id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
173,080
from __future__ import annotations import itertools from typing import ( Any, Callable, Hashable, Literal, Sequence, TypeVar, cast, ) import warnings import weakref import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( algos as libalgos, internals as libinternals, lib, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, QuantileInterpolation, Shape, npt, type_t, ) from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_dtype_equal, is_list_like, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( array_equals, isna, ) import pandas.core.algorithms as algos from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.sparse import SparseDtype import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import ( Index, ensure_index, ) from pandas.core.internals.base import ( DataManager, SingleDataManager, interleaved_dtype, ) from pandas.core.internals.blocks import ( Block, NumpyBlock, ensure_block_shape, extend_blocks, get_block_type, new_block, new_block_2d, ) from pandas.core.internals.ops import ( blockwise_all, operate_blockwise, ) def _merge_blocks( blocks: list[Block], dtype: DtypeObj, can_consolidate: bool ) -> tuple[list[Block], bool]: if len(blocks) == 1: return blocks, False if can_consolidate: # TODO: optimization potential in case all mgrs contain slices and # combination of those slices is a slice, too. new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) new_values: ArrayLike if isinstance(blocks[0].dtype, np.dtype): # error: List comprehension has incompatible type List[Union[ndarray, # ExtensionArray]]; expected List[Union[complex, generic, # Sequence[Union[int, float, complex, str, bytes, generic]], # Sequence[Sequence[Any]], SupportsArray]] new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc] else: bvals = [blk.values for blk in blocks] bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals) new_values = bvals2[0]._concat_same_type(bvals2, axis=0) argsort = np.argsort(new_mgr_locs) new_values = new_values[argsort] new_mgr_locs = new_mgr_locs[argsort] bp = BlockPlacement(new_mgr_locs) return [new_block_2d(new_values, placement=bp)], True # can't consolidate --> no merge return blocks, False class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False is_object = False is_extension = False _can_consolidate = True _validate_ndim = True def _consolidate_key(self): return self._can_consolidate, self.dtype.name def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in ["b", "i", "u"] return dtype._can_hold_na def is_bool(self) -> bool: """ We can be bool if a) we are bool dtype or b) object dtype with bool objects. """ return self.values.dtype == np.dtype(bool) def external_values(self): return external_values(self.values) def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) def _standardize_fill_value(self, value): # if we are passed a scalar None, convert it here if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value def mgr_locs(self) -> BlockPlacement: return self._mgr_locs def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs def make_block( self, values, placement=None, refs: BlockValuesRefs | None = None ) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) # TODO: perf by not going through new_block # We assume maybe_coerce_values has already been called return new_block(values, placement=placement, ndim=self.ndim, refs=refs) def make_block_same_class( self, values, placement: BlockPlacement | None = None, refs: BlockValuesRefs | None = None, ) -> Block: """Wrap given values in a block of same type as self.""" # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet # relied on it, as of 2.0 the caller is responsible for this. if placement is None: placement = self._mgr_locs # We assume maybe_coerce_values has already been called return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join([str(s) for s in self.shape]) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ # Note: the only place where we are called with ndarray[intp] # is from internals.concat, and we can verify that never happens # with 1-column blocks, i.e. never for ExtensionBlock. new_mgr_locs = self._mgr_locs[slicer] new_values = self._slice(slicer) refs = self.refs if isinstance(slicer, slice) else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) def getitem_block_columns( self, slicer: slice, new_mgr_locs: BlockPlacement ) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ new_values = self._slice(slicer) if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs) def _can_hold_element(self, element: Any) -> bool: """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ # faster equivalent to is_dtype_equal(value.dtype, self.dtype) try: return value.dtype == self.dtype except TypeError: return False # --------------------------------------------------------------------- # Apply/Reduce and Helpers def apply(self, func, **kwargs) -> list[Block]: """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func) -> list[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result: ArrayLike) -> list[Block]: # See also: split_and_operate if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self._mgr_locs): if not is_1d_only_ea_obj(result): vals = result[i : i + 1] else: vals = result[i] block = self.make_block(values=vals, placement=loc) nbs.append(block) return nbs nb = self.make_block(result) return [nb] def _split(self) -> list[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) new_blocks.append(nb) return new_blocks def split_and_operate(self, func, *args, **kwargs) -> list[Block]: """ Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block] """ assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks # --------------------------------------------------------------------- # Up/Down-casting def coerce_to_target_dtype(self, other) -> Block: """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ new_dtype = find_result_type(self.values, other) return self.astype(new_dtype, copy=False) def _maybe_downcast( self, blocks: list[Block], downcast=None, using_cow: bool = False ) -> list[Block]: if downcast is False: return blocks if self.dtype == _dtype_obj: # TODO: does it matter that self.dtype might not match blocks[i].dtype? # GH#44241 We downcast regardless of the argument; # respecting 'downcast=None' may be worthwhile at some point, # but ATM it breaks too much existing code. # split and convert the blocks return extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) if downcast is None: return blocks return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: """ downcast specialized to 2D case post-validation. Refactored to allow use of maybe_split. """ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) refs = self.refs if using_cow and new_values is self.values else None return [self.make_block(new_values, refs=refs)] def convert( self, *, copy: bool = True, using_cow: bool = False, ) -> list[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ if not copy and using_cow: return [self.copy(deep=False)] return [self.copy()] if copy else [self] # --------------------------------------------------------------------- # Array-Like Methods def dtype(self) -> DtypeObj: return self.values.dtype def astype( self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, ) -> Block: """ Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. Returns ------- Block """ values = self.values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if using_cow and astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) def copy(self, deep: bool = True) -> Block: """copy constructor""" values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) # --------------------------------------------------------------------- # Replace def replace( self, to_replace, value, inplace: bool = False, # mask may be pre-computed if we're called from replace_list mask: npt.NDArray[np.bool_] | None = None, using_cow: bool = False, ) -> list[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. """ # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through replace_list values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and (self.refs.has_reference() or not inplace): blk = self.copy() elif using_cow: blk = self.copy(deep=False) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=to_replace, value=value, inplace=True) return [blk] if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] elif self._can_hold_element(value): # TODO(CoW): Maybe split here as well into columns where mask has True # and rest? if using_cow: if inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self.copy() else: blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN blocks = blk.convert(copy=False, using_cow=using_cow) else: blocks = [blk] return blocks elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value) return blk.replace( to_replace=to_replace, value=value, inplace=True, mask=mask, ) else: # split so that we only upcast where necessary blocks = [] for i, nb in enumerate(self._split()): blocks.extend( type(self).replace( nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i : i + 1], using_cow=using_cow, ) ) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, mask=None, using_cow: bool = False, ) -> list[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. using_cow: bool, default False Specifying if copy on write is enabled. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] rx = re.compile(to_replace) if using_cow: if inplace and not self.refs.has_reference(): refs = self.refs new_values = self.values else: refs = None new_values = self.values.copy() else: refs = None new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values, refs=refs) return block.convert(copy=False, using_cow=using_cow) def replace_list( self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ See BlockManager.replace_list docstring. """ values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=src_list, value=dest_list, inplace=True) return [blk] # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): if using_cow: return [self.copy(deep=False)] # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = ( extract_bool_array( cast( ArrayLike, compare_or_regex_search( values, s[0], regex=regex, mask=na_mask ), ) ) for s in pairs ) else: # GH#38086 faster if we know we dont need to check for regex masks = (missing.mask_missing(values, s[0]) for s in pairs) # Materialize if inplace = True, since the masks can change # as we replace if inplace: masks = list(masks) if using_cow and inplace: # Don't set up refs here, otherwise we will think that we have # references when we check again later rb = [self] else: rb = [self if inplace else self.copy()] for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] # GH-39338: _replace_coerce can split a block into # single-column blocks, so track the index so we know # where to index into the mask for blk_num, blk in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num : blk_num + 1] # error: Argument "mask" to "_replace_coerce" of "Block" has # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; # expected "ndarray[Any, dtype[bool_]]" result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, using_cow=using_cow, ) if convert and blk.is_object and not all(x is None for x in dest_list): # GH#44498 avoid unwanted cast-back result = extend_blocks( [ b.convert(copy=True and not using_cow, using_cow=using_cow) for b in result ] ) new_rb.extend(result) rb = new_rb return rb def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) if (nb is self or using_cow) and not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, using_cow=using_cow, ) # --------------------------------------------------------------------- # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock # but not ExtensionBlock def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: """ For compatibility with 1D-only ExtensionArrays. """ return arg def _unwrap_setitem_indexer(self, indexer): """ For compatibility with 1D-only ExtensionArrays. """ return indexer # NB: this cannot be made cache_readonly because in mgr.set_values we pin # new .values that can have different shape GH#42631 def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # Note: only reached with self.ndim == 2 # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type # "Union[int, integer[Any]]" return self.values[i] # type: ignore[index] def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ArrayLike: """return a slice of my values""" return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: """ Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- `set_inplace` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype. """ if copy: self.values = self.values.copy() self.values[locs] = values def take_nd( self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None = None, fill_value=lib.no_default, ) -> Block: """ Take values according to indexer and return them as a block. """ values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # these assertions if isinstance(self, ExtensionBlock): # NB: in this case, the 'axis' kwarg will be ignored in the # algos.take_nd call above. assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask # --------------------------------------------------------------------- def setitem(self, indexer, value, using_cow: bool = False) -> Block: """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set using_cow: bool, default False Signaling if CoW is used. Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T # length checking check_setitem_lengths(indexer, value, values) value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: # current dtype cannot store value, coerce to common dtype nb = self.coerce_to_target_dtype(value) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: # TODO: avoid having to construct values[indexer] vi = values[indexer] if lib.is_list_like(vi): # checking lib.is_scalar here fails on # test_iloc_setitem_custom_object casted = setitem_datetimelike_compat(values, len(vi), casted) if using_cow and self.refs.has_reference(): values = values.copy() self = self.make_block_same_class( values.T if values.ndim == 2 else values ) values[indexer] = casted return self def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object using_cow: bool, default False Returns ------- List[Block] """ orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: if using_cow: return [self.copy(deep=False)] return [self] try: casted = np_can_hold_element(values.dtype, new) if using_cow and self.refs.has_reference(): # Do this here to avoid copying twice values = values.copy() self = self.make_block_same_class(values) putmask_without_repeat(values.T, mask, casted) if using_cow: return [self.copy(deep=False)] return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer], using_cow=using_cow) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n, using_cow=using_cow) res_blocks.extend(rbs) return res_blocks def where( self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False ) -> list[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray _downcast : str or None, default "infer" Private because we only specify it when calling from fillna. Returns ------- List[Block] """ assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting if using_cow: return [self.copy(deep=False)] return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, using_cow=using_cow) return self._maybe_downcast( blocks, downcast=_downcast, using_cow=using_cow ) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where( oth, submask, _downcast=_downcast, using_cow=using_cow ) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)] def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ # Caller is responsible for validating limit; if int it is strictly positive inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # can short-circuit the isna call noop = True else: mask = isna(self.values) mask, noop = validate_putmask(self.values, mask) if noop: # we can't process the value, but nothing to do if inplace: if using_cow: return [self.copy(deep=False)] # Arbitrarily imposing the convention that we ignore downcast # on no-op when inplace=True return [self] else: # GH#45423 consistent downcasting on no-ops. nb = self.copy(deep=not using_cow) nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) return nbs if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value, using_cow=using_cow) else: # without _downcast, we would break # test_fillna_dtype_conversion_equiv_replace nbs = self.where(value, ~mask.T, _downcast=False) # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) # makes a difference bc blk may have object dtype, which has # different behavior in _maybe_downcast. return extend_blocks( [ blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) for blk in nbs ] ) def interpolate( self, *, method: FillnaOptions = "pad", axis: AxisInt = 0, index: Index | None = None, inplace: bool = False, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, downcast: str | None = None, using_cow: bool = False, **kwargs, ) -> list[Block]: inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] try: m = missing.clean_fill_method(method) except ValueError: m = None if m is None and self.dtype.kind != "f": # only deal with floats # bc we already checked that can_hold_na, we don't have int dtype here # test_interp_basic checks that we make a copy here if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0: # split improves performance in ndarray.copy() return self.split_and_operate( type(self).interpolate, method=method, axis=axis, index=index, inplace=inplace, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, downcast=downcast, **kwargs, ) refs = None if inplace: if using_cow and self.refs.has_reference(): data = self.values.copy() else: data = self.values refs = self.refs else: data = self.values.copy() data = cast(np.ndarray, data) # bc overridden by ExtensionBlock missing.interpolate_array_2d( data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, **kwargs, ) nb = self.make_block_same_class(data, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: """return block for the diff of the values""" # only reached with ndim == 2 and axis == 1 new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also # Note: periods is never 0 here, as that is handled at the top of # NDFrame.shift. If that ever changes, we can do a check for periods=0 # and possibly avoid coercing. if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: # with object dtype there is nothing to promote, and the user can # pass pretty much any weird fill_value they like # see test_shift_object_non_scalar_fill raise ValueError("fill_value must be a scalar") fill_value = self._standardize_fill_value(fill_value) try: # error: Argument 1 to "np_can_hold_element" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" casted = np_can_hold_element( self.dtype, fill_value # type: ignore[arg-type] ) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value) return nb.shift(periods, axis=axis, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block(new_values)] def quantile( self, qs: Index, # with dtype float64 interpolation: QuantileInterpolation = "linear", axis: AxisInt = 0, ) -> Block: """ compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. axis : int, default 0 Axis to compute. Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) # ensure_block_shape needed for cases where we start with EA and result # is ndarray, e.g. IntegerArray, SparseArray result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) def round(self, decimals: int, using_cow: bool = False) -> Block: """ Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavivor. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this using_cow: bool, Whether Copy on Write is enabled right now """ if not self.is_numeric or self.is_bool: return self.copy(deep=not using_cow) refs = None # TODO: round only defined on BaseMaskedArray # Series also does this, so would need to fix both places # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" # has no attribute "round" values = self.values.round(decimals) # type: ignore[union-attr] if values is self.values: refs = self.refs if not using_cow: # Normally would need to do this before, but # numpy only returns same array when round operation # is no-op # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 values = values.copy() return self.make_block_same_class(values, refs=refs) # --------------------------------------------------------------------- # Abstract Methods Overridden By EABackedBlock and NumpyBlock def delete(self, loc) -> list[Block]: """Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array. """ if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError # Add one out-of-bounds indexer as maximum to collect # all columns after our last indexer if any loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 # TODO(CoW): This is tricky, if parent block goes out of scope # all split blocks are referencing each other even though they # don't share data refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: # There is no column between current and last idx pass else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs ) new_blocks.append(nb) previous_loc = idx return new_blocks def is_view(self) -> bool: """return a boolean if I am possibly a view""" raise AbstractMethodError(self) def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ raise AbstractMethodError(self) def values_for_json(self) -> np.ndarray: raise AbstractMethodError(self) def extend_blocks(result, blocks=None) -> list[Block]: """return a new extended blocks, given the result""" if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) else: assert isinstance(result, Block), type(result) blocks.append(result) return blocks The provided code snippet includes necessary dependencies for implementing the `_consolidate` function. Write a Python function `def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]` to solve the following problem: Merge blocks having same dtype, exclude non-consolidating blocks Here is the function: def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]: """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks: list[Block] = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks, _ = _merge_blocks( list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate ) new_blocks = extend_blocks(merged_blocks, new_blocks) return tuple(new_blocks)
Merge blocks having same dtype, exclude non-consolidating blocks
173,081
from __future__ import annotations import itertools from typing import ( Any, Callable, Hashable, Literal, Sequence, TypeVar, cast, ) import warnings import weakref import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( algos as libalgos, internals as libinternals, lib, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, QuantileInterpolation, Shape, npt, type_t, ) from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_dtype_equal, is_list_like, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( array_equals, isna, ) import pandas.core.algorithms as algos from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.sparse import SparseDtype import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import ( Index, ensure_index, ) from pandas.core.internals.base import ( DataManager, SingleDataManager, interleaved_dtype, ) from pandas.core.internals.blocks import ( Block, NumpyBlock, ensure_block_shape, extend_blocks, get_block_type, new_block, new_block_2d, ) from pandas.core.internals.ops import ( blockwise_all, operate_blockwise, ) The provided code snippet includes necessary dependencies for implementing the `_fast_count_smallints` function. Write a Python function `def _fast_count_smallints(arr: npt.NDArray[np.intp])` to solve the following problem: Faster version of set(arr) for sequences of small numbers. Here is the function: def _fast_count_smallints(arr: npt.NDArray[np.intp]): """Faster version of set(arr) for sequences of small numbers.""" counts = np.bincount(arr) nz = counts.nonzero()[0] # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here, # in one benchmark by a factor of 11 return zip(nz, counts[nz])
Faster version of set(arr) for sequences of small numbers.
173,082
from __future__ import annotations import itertools from typing import ( Any, Callable, Hashable, Literal, Sequence, TypeVar, cast, ) import warnings import weakref import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( algos as libalgos, internals as libinternals, lib, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, QuantileInterpolation, Shape, npt, type_t, ) from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_dtype_equal, is_list_like, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( array_equals, isna, ) import pandas.core.algorithms as algos from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.sparse import SparseDtype import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import ( Index, ensure_index, ) from pandas.core.internals.base import ( DataManager, SingleDataManager, interleaved_dtype, ) from pandas.core.internals.blocks import ( Block, NumpyBlock, ensure_block_shape, extend_blocks, get_block_type, new_block, new_block_2d, ) from pandas.core.internals.ops import ( blockwise_all, operate_blockwise, ) ensure_platform_int = algos.ensure_platform_int def _preprocess_slice_or_indexer( slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool ): if isinstance(slice_or_indexer, slice): return ( "slice", slice_or_indexer, libinternals.slice_len(slice_or_indexer, length), ) else: if ( not isinstance(slice_or_indexer, np.ndarray) or slice_or_indexer.dtype.kind != "i" ): dtype = getattr(slice_or_indexer, "dtype", None) raise TypeError(type(slice_or_indexer), dtype) indexer = ensure_platform_int(slice_or_indexer) if not allow_fill: indexer = maybe_convert_indices(indexer, length) return "fancy", indexer, len(indexer)
null
173,083
from __future__ import annotations from typing import ( Literal, TypeVar, final, ) import numpy as np from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, Shape, ) from pandas.errors import AbstractMethodError from pandas.core.dtypes.cast import ( find_common_type, np_can_hold_element, ) from pandas.core.base import PandasObject from pandas.core.indexes.api import ( Index, default_index, ) DtypeObj = Union[np.dtype, "ExtensionDtype"] def find_common_type(types: list[np.dtype]) -> np.dtype: ... def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ... def find_common_type(types: list[DtypeObj]) -> DtypeObj: ... def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if not types: raise ValueError("no types given") first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if lib.dtypes_all_equal(list(types)): return first # get unique types (dict.fromkeys is used as order-preserving set()) types = list(dict.fromkeys(types).keys()) if any(isinstance(t, ExtensionDtype) for t in types): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype("object") # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype("datetime64[ns]") if all(is_timedelta64_dtype(t) for t in types): return np.dtype("timedelta64[ns]") # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.dtype("object") return np.find_common_type(types, []) The provided code snippet includes necessary dependencies for implementing the `interleaved_dtype` function. Write a Python function `def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None` to solve the following problem: Find the common dtype for `blocks`. Parameters ---------- blocks : List[DtypeObj] Returns ------- dtype : np.dtype, ExtensionDtype, or None None is returned when `blocks` is empty. Here is the function: def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None: """ Find the common dtype for `blocks`. Parameters ---------- blocks : List[DtypeObj] Returns ------- dtype : np.dtype, ExtensionDtype, or None None is returned when `blocks` is empty. """ if not len(dtypes): return None return find_common_type(dtypes)
Find the common dtype for `blocks`. Parameters ---------- blocks : List[DtypeObj] Returns ------- dtype : np.dtype, ExtensionDtype, or None None is returned when `blocks` is empty.
173,084
from __future__ import annotations from functools import wraps import re from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast, final, ) import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( internals as libinternals, lib, writers, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._libs.missing import NA from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, F, FillnaOptions, IgnoreRaise, QuantileInterpolation, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import ( astype_array_safe, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, is_interval_dtype, is_list_like, is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, should_use_regex, ) from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_setitem_lengths class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False is_object = False is_extension = False _can_consolidate = True _validate_ndim = True def _consolidate_key(self): return self._can_consolidate, self.dtype.name def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in ["b", "i", "u"] return dtype._can_hold_na def is_bool(self) -> bool: """ We can be bool if a) we are bool dtype or b) object dtype with bool objects. """ return self.values.dtype == np.dtype(bool) def external_values(self): return external_values(self.values) def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) def _standardize_fill_value(self, value): # if we are passed a scalar None, convert it here if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value def mgr_locs(self) -> BlockPlacement: return self._mgr_locs def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs def make_block( self, values, placement=None, refs: BlockValuesRefs | None = None ) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) # TODO: perf by not going through new_block # We assume maybe_coerce_values has already been called return new_block(values, placement=placement, ndim=self.ndim, refs=refs) def make_block_same_class( self, values, placement: BlockPlacement | None = None, refs: BlockValuesRefs | None = None, ) -> Block: """Wrap given values in a block of same type as self.""" # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet # relied on it, as of 2.0 the caller is responsible for this. if placement is None: placement = self._mgr_locs # We assume maybe_coerce_values has already been called return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join([str(s) for s in self.shape]) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ # Note: the only place where we are called with ndarray[intp] # is from internals.concat, and we can verify that never happens # with 1-column blocks, i.e. never for ExtensionBlock. new_mgr_locs = self._mgr_locs[slicer] new_values = self._slice(slicer) refs = self.refs if isinstance(slicer, slice) else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) def getitem_block_columns( self, slicer: slice, new_mgr_locs: BlockPlacement ) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ new_values = self._slice(slicer) if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs) def _can_hold_element(self, element: Any) -> bool: """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ # faster equivalent to is_dtype_equal(value.dtype, self.dtype) try: return value.dtype == self.dtype except TypeError: return False # --------------------------------------------------------------------- # Apply/Reduce and Helpers def apply(self, func, **kwargs) -> list[Block]: """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func) -> list[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result: ArrayLike) -> list[Block]: # See also: split_and_operate if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self._mgr_locs): if not is_1d_only_ea_obj(result): vals = result[i : i + 1] else: vals = result[i] block = self.make_block(values=vals, placement=loc) nbs.append(block) return nbs nb = self.make_block(result) return [nb] def _split(self) -> list[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) new_blocks.append(nb) return new_blocks def split_and_operate(self, func, *args, **kwargs) -> list[Block]: """ Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block] """ assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks # --------------------------------------------------------------------- # Up/Down-casting def coerce_to_target_dtype(self, other) -> Block: """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ new_dtype = find_result_type(self.values, other) return self.astype(new_dtype, copy=False) def _maybe_downcast( self, blocks: list[Block], downcast=None, using_cow: bool = False ) -> list[Block]: if downcast is False: return blocks if self.dtype == _dtype_obj: # TODO: does it matter that self.dtype might not match blocks[i].dtype? # GH#44241 We downcast regardless of the argument; # respecting 'downcast=None' may be worthwhile at some point, # but ATM it breaks too much existing code. # split and convert the blocks return extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) if downcast is None: return blocks return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: """ downcast specialized to 2D case post-validation. Refactored to allow use of maybe_split. """ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) refs = self.refs if using_cow and new_values is self.values else None return [self.make_block(new_values, refs=refs)] def convert( self, *, copy: bool = True, using_cow: bool = False, ) -> list[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ if not copy and using_cow: return [self.copy(deep=False)] return [self.copy()] if copy else [self] # --------------------------------------------------------------------- # Array-Like Methods def dtype(self) -> DtypeObj: return self.values.dtype def astype( self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, ) -> Block: """ Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. Returns ------- Block """ values = self.values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if using_cow and astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) def copy(self, deep: bool = True) -> Block: """copy constructor""" values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) # --------------------------------------------------------------------- # Replace def replace( self, to_replace, value, inplace: bool = False, # mask may be pre-computed if we're called from replace_list mask: npt.NDArray[np.bool_] | None = None, using_cow: bool = False, ) -> list[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. """ # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through replace_list values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and (self.refs.has_reference() or not inplace): blk = self.copy() elif using_cow: blk = self.copy(deep=False) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=to_replace, value=value, inplace=True) return [blk] if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] elif self._can_hold_element(value): # TODO(CoW): Maybe split here as well into columns where mask has True # and rest? if using_cow: if inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self.copy() else: blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN blocks = blk.convert(copy=False, using_cow=using_cow) else: blocks = [blk] return blocks elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value) return blk.replace( to_replace=to_replace, value=value, inplace=True, mask=mask, ) else: # split so that we only upcast where necessary blocks = [] for i, nb in enumerate(self._split()): blocks.extend( type(self).replace( nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i : i + 1], using_cow=using_cow, ) ) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, mask=None, using_cow: bool = False, ) -> list[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. using_cow: bool, default False Specifying if copy on write is enabled. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] rx = re.compile(to_replace) if using_cow: if inplace and not self.refs.has_reference(): refs = self.refs new_values = self.values else: refs = None new_values = self.values.copy() else: refs = None new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values, refs=refs) return block.convert(copy=False, using_cow=using_cow) def replace_list( self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ See BlockManager.replace_list docstring. """ values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=src_list, value=dest_list, inplace=True) return [blk] # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): if using_cow: return [self.copy(deep=False)] # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = ( extract_bool_array( cast( ArrayLike, compare_or_regex_search( values, s[0], regex=regex, mask=na_mask ), ) ) for s in pairs ) else: # GH#38086 faster if we know we dont need to check for regex masks = (missing.mask_missing(values, s[0]) for s in pairs) # Materialize if inplace = True, since the masks can change # as we replace if inplace: masks = list(masks) if using_cow and inplace: # Don't set up refs here, otherwise we will think that we have # references when we check again later rb = [self] else: rb = [self if inplace else self.copy()] for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] # GH-39338: _replace_coerce can split a block into # single-column blocks, so track the index so we know # where to index into the mask for blk_num, blk in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num : blk_num + 1] # error: Argument "mask" to "_replace_coerce" of "Block" has # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; # expected "ndarray[Any, dtype[bool_]]" result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, using_cow=using_cow, ) if convert and blk.is_object and not all(x is None for x in dest_list): # GH#44498 avoid unwanted cast-back result = extend_blocks( [ b.convert(copy=True and not using_cow, using_cow=using_cow) for b in result ] ) new_rb.extend(result) rb = new_rb return rb def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) if (nb is self or using_cow) and not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, using_cow=using_cow, ) # --------------------------------------------------------------------- # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock # but not ExtensionBlock def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: """ For compatibility with 1D-only ExtensionArrays. """ return arg def _unwrap_setitem_indexer(self, indexer): """ For compatibility with 1D-only ExtensionArrays. """ return indexer # NB: this cannot be made cache_readonly because in mgr.set_values we pin # new .values that can have different shape GH#42631 def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # Note: only reached with self.ndim == 2 # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type # "Union[int, integer[Any]]" return self.values[i] # type: ignore[index] def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ArrayLike: """return a slice of my values""" return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: """ Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- `set_inplace` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype. """ if copy: self.values = self.values.copy() self.values[locs] = values def take_nd( self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None = None, fill_value=lib.no_default, ) -> Block: """ Take values according to indexer and return them as a block. """ values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # these assertions if isinstance(self, ExtensionBlock): # NB: in this case, the 'axis' kwarg will be ignored in the # algos.take_nd call above. assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask # --------------------------------------------------------------------- def setitem(self, indexer, value, using_cow: bool = False) -> Block: """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set using_cow: bool, default False Signaling if CoW is used. Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T # length checking check_setitem_lengths(indexer, value, values) value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: # current dtype cannot store value, coerce to common dtype nb = self.coerce_to_target_dtype(value) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: # TODO: avoid having to construct values[indexer] vi = values[indexer] if lib.is_list_like(vi): # checking lib.is_scalar here fails on # test_iloc_setitem_custom_object casted = setitem_datetimelike_compat(values, len(vi), casted) if using_cow and self.refs.has_reference(): values = values.copy() self = self.make_block_same_class( values.T if values.ndim == 2 else values ) values[indexer] = casted return self def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object using_cow: bool, default False Returns ------- List[Block] """ orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: if using_cow: return [self.copy(deep=False)] return [self] try: casted = np_can_hold_element(values.dtype, new) if using_cow and self.refs.has_reference(): # Do this here to avoid copying twice values = values.copy() self = self.make_block_same_class(values) putmask_without_repeat(values.T, mask, casted) if using_cow: return [self.copy(deep=False)] return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer], using_cow=using_cow) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n, using_cow=using_cow) res_blocks.extend(rbs) return res_blocks def where( self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False ) -> list[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray _downcast : str or None, default "infer" Private because we only specify it when calling from fillna. Returns ------- List[Block] """ assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting if using_cow: return [self.copy(deep=False)] return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, using_cow=using_cow) return self._maybe_downcast( blocks, downcast=_downcast, using_cow=using_cow ) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where( oth, submask, _downcast=_downcast, using_cow=using_cow ) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)] def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ # Caller is responsible for validating limit; if int it is strictly positive inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # can short-circuit the isna call noop = True else: mask = isna(self.values) mask, noop = validate_putmask(self.values, mask) if noop: # we can't process the value, but nothing to do if inplace: if using_cow: return [self.copy(deep=False)] # Arbitrarily imposing the convention that we ignore downcast # on no-op when inplace=True return [self] else: # GH#45423 consistent downcasting on no-ops. nb = self.copy(deep=not using_cow) nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) return nbs if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value, using_cow=using_cow) else: # without _downcast, we would break # test_fillna_dtype_conversion_equiv_replace nbs = self.where(value, ~mask.T, _downcast=False) # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) # makes a difference bc blk may have object dtype, which has # different behavior in _maybe_downcast. return extend_blocks( [ blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) for blk in nbs ] ) def interpolate( self, *, method: FillnaOptions = "pad", axis: AxisInt = 0, index: Index | None = None, inplace: bool = False, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, downcast: str | None = None, using_cow: bool = False, **kwargs, ) -> list[Block]: inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] try: m = missing.clean_fill_method(method) except ValueError: m = None if m is None and self.dtype.kind != "f": # only deal with floats # bc we already checked that can_hold_na, we don't have int dtype here # test_interp_basic checks that we make a copy here if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0: # split improves performance in ndarray.copy() return self.split_and_operate( type(self).interpolate, method=method, axis=axis, index=index, inplace=inplace, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, downcast=downcast, **kwargs, ) refs = None if inplace: if using_cow and self.refs.has_reference(): data = self.values.copy() else: data = self.values refs = self.refs else: data = self.values.copy() data = cast(np.ndarray, data) # bc overridden by ExtensionBlock missing.interpolate_array_2d( data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, **kwargs, ) nb = self.make_block_same_class(data, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: """return block for the diff of the values""" # only reached with ndim == 2 and axis == 1 new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also # Note: periods is never 0 here, as that is handled at the top of # NDFrame.shift. If that ever changes, we can do a check for periods=0 # and possibly avoid coercing. if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: # with object dtype there is nothing to promote, and the user can # pass pretty much any weird fill_value they like # see test_shift_object_non_scalar_fill raise ValueError("fill_value must be a scalar") fill_value = self._standardize_fill_value(fill_value) try: # error: Argument 1 to "np_can_hold_element" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" casted = np_can_hold_element( self.dtype, fill_value # type: ignore[arg-type] ) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value) return nb.shift(periods, axis=axis, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block(new_values)] def quantile( self, qs: Index, # with dtype float64 interpolation: QuantileInterpolation = "linear", axis: AxisInt = 0, ) -> Block: """ compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. axis : int, default 0 Axis to compute. Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) # ensure_block_shape needed for cases where we start with EA and result # is ndarray, e.g. IntegerArray, SparseArray result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) def round(self, decimals: int, using_cow: bool = False) -> Block: """ Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavivor. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this using_cow: bool, Whether Copy on Write is enabled right now """ if not self.is_numeric or self.is_bool: return self.copy(deep=not using_cow) refs = None # TODO: round only defined on BaseMaskedArray # Series also does this, so would need to fix both places # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" # has no attribute "round" values = self.values.round(decimals) # type: ignore[union-attr] if values is self.values: refs = self.refs if not using_cow: # Normally would need to do this before, but # numpy only returns same array when round operation # is no-op # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 values = values.copy() return self.make_block_same_class(values, refs=refs) # --------------------------------------------------------------------- # Abstract Methods Overridden By EABackedBlock and NumpyBlock def delete(self, loc) -> list[Block]: """Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array. """ if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError # Add one out-of-bounds indexer as maximum to collect # all columns after our last indexer if any loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 # TODO(CoW): This is tricky, if parent block goes out of scope # all split blocks are referencing each other even though they # don't share data refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: # There is no column between current and last idx pass else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs ) new_blocks.append(nb) previous_loc = idx return new_blocks def is_view(self) -> bool: """return a boolean if I am possibly a view""" raise AbstractMethodError(self) def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ raise AbstractMethodError(self) def values_for_json(self) -> np.ndarray: raise AbstractMethodError(self) def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `maybe_split` function. Write a Python function `def maybe_split(meth: F) -> F` to solve the following problem: If we have a multi-column block, split and operate block-wise. Otherwise use the original method. Here is the function: def maybe_split(meth: F) -> F: """ If we have a multi-column block, split and operate block-wise. Otherwise use the original method. """ @wraps(meth) def newfunc(self, *args, **kwargs) -> list[Block]: if self.ndim == 1 or self.shape[0] == 1: return meth(self, *args, **kwargs) else: # Split and operate column-by-column return self.split_and_operate(meth, *args, **kwargs) return cast(F, newfunc)
If we have a multi-column block, split and operate block-wise. Otherwise use the original method.
173,085
from __future__ import annotations from functools import wraps import re from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast, final, ) import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( internals as libinternals, lib, writers, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._libs.missing import NA from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, F, FillnaOptions, IgnoreRaise, QuantileInterpolation, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import ( astype_array_safe, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, is_interval_dtype, is_list_like, is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, should_use_regex, ) from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_setitem_lengths The provided code snippet includes necessary dependencies for implementing the `_catch_deprecated_value_error` function. Write a Python function `def _catch_deprecated_value_error(err: Exception) -> None` to solve the following problem: We catch ValueError for now, but only a specific one raised by DatetimeArray which will no longer be raised in version.2.0. Here is the function: def _catch_deprecated_value_error(err: Exception) -> None: """ We catch ValueError for now, but only a specific one raised by DatetimeArray which will no longer be raised in version.2.0. """ if isinstance(err, ValueError): if isinstance(err, IncompatibleFrequency): pass elif "'value.closed' is" in str(err): # IntervalDtype mismatched 'closed' pass
We catch ValueError for now, but only a specific one raised by DatetimeArray which will no longer be raised in version.2.0.
173,086
from __future__ import annotations from functools import wraps import re from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast, final, ) import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( internals as libinternals, lib, writers, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._libs.missing import NA from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, F, FillnaOptions, IgnoreRaise, QuantileInterpolation, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import ( astype_array_safe, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, is_interval_dtype, is_list_like, is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, should_use_regex, ) from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_setitem_lengths class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False is_object = False is_extension = False _can_consolidate = True _validate_ndim = True def _consolidate_key(self): return self._can_consolidate, self.dtype.name def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in ["b", "i", "u"] return dtype._can_hold_na def is_bool(self) -> bool: """ We can be bool if a) we are bool dtype or b) object dtype with bool objects. """ return self.values.dtype == np.dtype(bool) def external_values(self): return external_values(self.values) def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) def _standardize_fill_value(self, value): # if we are passed a scalar None, convert it here if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value def mgr_locs(self) -> BlockPlacement: return self._mgr_locs def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs def make_block( self, values, placement=None, refs: BlockValuesRefs | None = None ) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) # TODO: perf by not going through new_block # We assume maybe_coerce_values has already been called return new_block(values, placement=placement, ndim=self.ndim, refs=refs) def make_block_same_class( self, values, placement: BlockPlacement | None = None, refs: BlockValuesRefs | None = None, ) -> Block: """Wrap given values in a block of same type as self.""" # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet # relied on it, as of 2.0 the caller is responsible for this. if placement is None: placement = self._mgr_locs # We assume maybe_coerce_values has already been called return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join([str(s) for s in self.shape]) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ # Note: the only place where we are called with ndarray[intp] # is from internals.concat, and we can verify that never happens # with 1-column blocks, i.e. never for ExtensionBlock. new_mgr_locs = self._mgr_locs[slicer] new_values = self._slice(slicer) refs = self.refs if isinstance(slicer, slice) else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) def getitem_block_columns( self, slicer: slice, new_mgr_locs: BlockPlacement ) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ new_values = self._slice(slicer) if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs) def _can_hold_element(self, element: Any) -> bool: """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ # faster equivalent to is_dtype_equal(value.dtype, self.dtype) try: return value.dtype == self.dtype except TypeError: return False # --------------------------------------------------------------------- # Apply/Reduce and Helpers def apply(self, func, **kwargs) -> list[Block]: """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func) -> list[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result: ArrayLike) -> list[Block]: # See also: split_and_operate if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self._mgr_locs): if not is_1d_only_ea_obj(result): vals = result[i : i + 1] else: vals = result[i] block = self.make_block(values=vals, placement=loc) nbs.append(block) return nbs nb = self.make_block(result) return [nb] def _split(self) -> list[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) new_blocks.append(nb) return new_blocks def split_and_operate(self, func, *args, **kwargs) -> list[Block]: """ Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block] """ assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks # --------------------------------------------------------------------- # Up/Down-casting def coerce_to_target_dtype(self, other) -> Block: """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ new_dtype = find_result_type(self.values, other) return self.astype(new_dtype, copy=False) def _maybe_downcast( self, blocks: list[Block], downcast=None, using_cow: bool = False ) -> list[Block]: if downcast is False: return blocks if self.dtype == _dtype_obj: # TODO: does it matter that self.dtype might not match blocks[i].dtype? # GH#44241 We downcast regardless of the argument; # respecting 'downcast=None' may be worthwhile at some point, # but ATM it breaks too much existing code. # split and convert the blocks return extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) if downcast is None: return blocks return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: """ downcast specialized to 2D case post-validation. Refactored to allow use of maybe_split. """ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) refs = self.refs if using_cow and new_values is self.values else None return [self.make_block(new_values, refs=refs)] def convert( self, *, copy: bool = True, using_cow: bool = False, ) -> list[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ if not copy and using_cow: return [self.copy(deep=False)] return [self.copy()] if copy else [self] # --------------------------------------------------------------------- # Array-Like Methods def dtype(self) -> DtypeObj: return self.values.dtype def astype( self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, ) -> Block: """ Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. Returns ------- Block """ values = self.values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if using_cow and astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) def copy(self, deep: bool = True) -> Block: """copy constructor""" values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) # --------------------------------------------------------------------- # Replace def replace( self, to_replace, value, inplace: bool = False, # mask may be pre-computed if we're called from replace_list mask: npt.NDArray[np.bool_] | None = None, using_cow: bool = False, ) -> list[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. """ # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through replace_list values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and (self.refs.has_reference() or not inplace): blk = self.copy() elif using_cow: blk = self.copy(deep=False) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=to_replace, value=value, inplace=True) return [blk] if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] elif self._can_hold_element(value): # TODO(CoW): Maybe split here as well into columns where mask has True # and rest? if using_cow: if inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self.copy() else: blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN blocks = blk.convert(copy=False, using_cow=using_cow) else: blocks = [blk] return blocks elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value) return blk.replace( to_replace=to_replace, value=value, inplace=True, mask=mask, ) else: # split so that we only upcast where necessary blocks = [] for i, nb in enumerate(self._split()): blocks.extend( type(self).replace( nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i : i + 1], using_cow=using_cow, ) ) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, mask=None, using_cow: bool = False, ) -> list[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. using_cow: bool, default False Specifying if copy on write is enabled. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] rx = re.compile(to_replace) if using_cow: if inplace and not self.refs.has_reference(): refs = self.refs new_values = self.values else: refs = None new_values = self.values.copy() else: refs = None new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values, refs=refs) return block.convert(copy=False, using_cow=using_cow) def replace_list( self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ See BlockManager.replace_list docstring. """ values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=src_list, value=dest_list, inplace=True) return [blk] # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): if using_cow: return [self.copy(deep=False)] # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = ( extract_bool_array( cast( ArrayLike, compare_or_regex_search( values, s[0], regex=regex, mask=na_mask ), ) ) for s in pairs ) else: # GH#38086 faster if we know we dont need to check for regex masks = (missing.mask_missing(values, s[0]) for s in pairs) # Materialize if inplace = True, since the masks can change # as we replace if inplace: masks = list(masks) if using_cow and inplace: # Don't set up refs here, otherwise we will think that we have # references when we check again later rb = [self] else: rb = [self if inplace else self.copy()] for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] # GH-39338: _replace_coerce can split a block into # single-column blocks, so track the index so we know # where to index into the mask for blk_num, blk in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num : blk_num + 1] # error: Argument "mask" to "_replace_coerce" of "Block" has # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; # expected "ndarray[Any, dtype[bool_]]" result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, using_cow=using_cow, ) if convert and blk.is_object and not all(x is None for x in dest_list): # GH#44498 avoid unwanted cast-back result = extend_blocks( [ b.convert(copy=True and not using_cow, using_cow=using_cow) for b in result ] ) new_rb.extend(result) rb = new_rb return rb def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) if (nb is self or using_cow) and not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, using_cow=using_cow, ) # --------------------------------------------------------------------- # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock # but not ExtensionBlock def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: """ For compatibility with 1D-only ExtensionArrays. """ return arg def _unwrap_setitem_indexer(self, indexer): """ For compatibility with 1D-only ExtensionArrays. """ return indexer # NB: this cannot be made cache_readonly because in mgr.set_values we pin # new .values that can have different shape GH#42631 def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # Note: only reached with self.ndim == 2 # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type # "Union[int, integer[Any]]" return self.values[i] # type: ignore[index] def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ArrayLike: """return a slice of my values""" return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: """ Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- `set_inplace` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype. """ if copy: self.values = self.values.copy() self.values[locs] = values def take_nd( self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None = None, fill_value=lib.no_default, ) -> Block: """ Take values according to indexer and return them as a block. """ values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # these assertions if isinstance(self, ExtensionBlock): # NB: in this case, the 'axis' kwarg will be ignored in the # algos.take_nd call above. assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask # --------------------------------------------------------------------- def setitem(self, indexer, value, using_cow: bool = False) -> Block: """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set using_cow: bool, default False Signaling if CoW is used. Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T # length checking check_setitem_lengths(indexer, value, values) value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: # current dtype cannot store value, coerce to common dtype nb = self.coerce_to_target_dtype(value) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: # TODO: avoid having to construct values[indexer] vi = values[indexer] if lib.is_list_like(vi): # checking lib.is_scalar here fails on # test_iloc_setitem_custom_object casted = setitem_datetimelike_compat(values, len(vi), casted) if using_cow and self.refs.has_reference(): values = values.copy() self = self.make_block_same_class( values.T if values.ndim == 2 else values ) values[indexer] = casted return self def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object using_cow: bool, default False Returns ------- List[Block] """ orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: if using_cow: return [self.copy(deep=False)] return [self] try: casted = np_can_hold_element(values.dtype, new) if using_cow and self.refs.has_reference(): # Do this here to avoid copying twice values = values.copy() self = self.make_block_same_class(values) putmask_without_repeat(values.T, mask, casted) if using_cow: return [self.copy(deep=False)] return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer], using_cow=using_cow) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n, using_cow=using_cow) res_blocks.extend(rbs) return res_blocks def where( self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False ) -> list[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray _downcast : str or None, default "infer" Private because we only specify it when calling from fillna. Returns ------- List[Block] """ assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting if using_cow: return [self.copy(deep=False)] return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, using_cow=using_cow) return self._maybe_downcast( blocks, downcast=_downcast, using_cow=using_cow ) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where( oth, submask, _downcast=_downcast, using_cow=using_cow ) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)] def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ # Caller is responsible for validating limit; if int it is strictly positive inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # can short-circuit the isna call noop = True else: mask = isna(self.values) mask, noop = validate_putmask(self.values, mask) if noop: # we can't process the value, but nothing to do if inplace: if using_cow: return [self.copy(deep=False)] # Arbitrarily imposing the convention that we ignore downcast # on no-op when inplace=True return [self] else: # GH#45423 consistent downcasting on no-ops. nb = self.copy(deep=not using_cow) nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) return nbs if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value, using_cow=using_cow) else: # without _downcast, we would break # test_fillna_dtype_conversion_equiv_replace nbs = self.where(value, ~mask.T, _downcast=False) # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) # makes a difference bc blk may have object dtype, which has # different behavior in _maybe_downcast. return extend_blocks( [ blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) for blk in nbs ] ) def interpolate( self, *, method: FillnaOptions = "pad", axis: AxisInt = 0, index: Index | None = None, inplace: bool = False, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, downcast: str | None = None, using_cow: bool = False, **kwargs, ) -> list[Block]: inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] try: m = missing.clean_fill_method(method) except ValueError: m = None if m is None and self.dtype.kind != "f": # only deal with floats # bc we already checked that can_hold_na, we don't have int dtype here # test_interp_basic checks that we make a copy here if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0: # split improves performance in ndarray.copy() return self.split_and_operate( type(self).interpolate, method=method, axis=axis, index=index, inplace=inplace, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, downcast=downcast, **kwargs, ) refs = None if inplace: if using_cow and self.refs.has_reference(): data = self.values.copy() else: data = self.values refs = self.refs else: data = self.values.copy() data = cast(np.ndarray, data) # bc overridden by ExtensionBlock missing.interpolate_array_2d( data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, **kwargs, ) nb = self.make_block_same_class(data, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: """return block for the diff of the values""" # only reached with ndim == 2 and axis == 1 new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also # Note: periods is never 0 here, as that is handled at the top of # NDFrame.shift. If that ever changes, we can do a check for periods=0 # and possibly avoid coercing. if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: # with object dtype there is nothing to promote, and the user can # pass pretty much any weird fill_value they like # see test_shift_object_non_scalar_fill raise ValueError("fill_value must be a scalar") fill_value = self._standardize_fill_value(fill_value) try: # error: Argument 1 to "np_can_hold_element" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" casted = np_can_hold_element( self.dtype, fill_value # type: ignore[arg-type] ) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value) return nb.shift(periods, axis=axis, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block(new_values)] def quantile( self, qs: Index, # with dtype float64 interpolation: QuantileInterpolation = "linear", axis: AxisInt = 0, ) -> Block: """ compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. axis : int, default 0 Axis to compute. Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) # ensure_block_shape needed for cases where we start with EA and result # is ndarray, e.g. IntegerArray, SparseArray result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) def round(self, decimals: int, using_cow: bool = False) -> Block: """ Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavivor. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this using_cow: bool, Whether Copy on Write is enabled right now """ if not self.is_numeric or self.is_bool: return self.copy(deep=not using_cow) refs = None # TODO: round only defined on BaseMaskedArray # Series also does this, so would need to fix both places # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" # has no attribute "round" values = self.values.round(decimals) # type: ignore[union-attr] if values is self.values: refs = self.refs if not using_cow: # Normally would need to do this before, but # numpy only returns same array when round operation # is no-op # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 values = values.copy() return self.make_block_same_class(values, refs=refs) # --------------------------------------------------------------------- # Abstract Methods Overridden By EABackedBlock and NumpyBlock def delete(self, loc) -> list[Block]: """Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array. """ if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError # Add one out-of-bounds indexer as maximum to collect # all columns after our last indexer if any loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 # TODO(CoW): This is tricky, if parent block goes out of scope # all split blocks are referencing each other even though they # don't share data refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: # There is no column between current and last idx pass else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs ) new_blocks.append(nb) previous_loc = idx return new_blocks def is_view(self) -> bool: """return a boolean if I am possibly a view""" raise AbstractMethodError(self) def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ raise AbstractMethodError(self) def values_for_json(self) -> np.ndarray: raise AbstractMethodError(self) def maybe_coerce_values(values: ArrayLike) -> ArrayLike: """ Input validation for values passed to __init__. Ensure that any datetime64/timedelta64 dtypes are in nanoseconds. Ensure that we do not have string dtypes. Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- values : np.ndarray or ExtensionArray """ # Caller is responsible for ensuring PandasArray is already extracted. if isinstance(values, np.ndarray): values = ensure_wrapped_if_datetimelike(values) if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame values = values._with_freq(None) return values def get_block_type(dtype: DtypeObj): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """ # We use kind checks because it is much more performant # than is_foo_dtype kind = dtype.kind cls: type[Block] if isinstance(dtype, SparseDtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif isinstance(dtype, DatetimeTZDtype): cls = DatetimeTZBlock elif isinstance(dtype, PeriodDtype): cls = NDArrayBackedExtensionBlock elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock elif kind in ["M", "m"]: cls = DatetimeLikeBlock elif kind in ["f", "c", "i", "u", "b"]: cls = NumericBlock else: cls = ObjectBlock return cls def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: """ ndim inference and validation. Validates that values.ndim and ndim are consistent. Validates that len(values) and len(placement) are consistent. Parameters ---------- values : array-like placement : BlockPlacement ndim : int Raises ------ ValueError : the number of dimensions do not match """ if values.ndim > ndim: # Check for both np.ndarray and ExtensionArray raise ValueError( "Wrong number of dimensions. " f"values.ndim > ndim [{values.ndim} > {ndim}]" ) if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): special case not needed with 2D EAs if values.ndim != ndim: raise ValueError( "Wrong number of dimensions. " f"values.ndim != ndim [{values.ndim} != {ndim}]" ) if len(placement) != len(values): raise ValueError( f"Wrong number of items passed {len(values)}, " f"placement implies {len(placement)}" ) elif ndim == 2 and len(placement) != 1: # TODO(EA2D): special case unnecessary with 2D EAs raise ValueError("need to split") def new_block( values, placement, *, ndim: int, refs: BlockValuesRefs | None = None ) -> Block: # caller is responsible for ensuring values is NOT a PandasArray if not isinstance(placement, BlockPlacement): placement = BlockPlacement(placement) check_ndim(values, placement, ndim) klass = get_block_type(values.dtype) values = maybe_coerce_values(values) return klass(values, ndim=ndim, placement=placement, refs=refs)
null
173,087
from __future__ import annotations from functools import wraps import re from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast, final, ) import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( internals as libinternals, lib, writers, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._libs.missing import NA from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, F, FillnaOptions, IgnoreRaise, QuantileInterpolation, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import ( astype_array_safe, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, is_interval_dtype, is_list_like, is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, should_use_regex, ) from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_setitem_lengths _dtype_obj = np.dtype("object") ArrayLike = Union["ExtensionArray", np.ndarray] ensure_platform_int = algos.ensure_platform_int def is_sparse(arr) -> bool: """ Check whether an array-like is a 1-D pandas sparse array. Check that the one-dimensional array-like is a pandas sparse array. Returns True if it is a pandas sparse array, not another type of sparse array. Parameters ---------- arr : array-like Array-like to check. Returns ------- bool Whether or not the array-like is a pandas sparse array. Examples -------- Returns `True` if the parameter is a 1-D pandas sparse array. >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0])) True >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0]))) True Returns `False` if the parameter is not sparse. >>> is_sparse(np.array([0, 0, 1, 0])) False >>> is_sparse(pd.Series([0, 1, 0, 0])) False Returns `False` if the parameter is not a pandas sparse array. >>> from scipy.sparse import bsr_matrix >>> is_sparse(bsr_matrix([0, 1, 0, 0])) False Returns `False` if the parameter has more than one dimension. """ from pandas.core.arrays.sparse import SparseDtype dtype = getattr(arr, "dtype", arr) return isinstance(dtype, SparseDtype) def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) ) def ensure_wrapped_if_datetimelike(arr): """ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. """ if isinstance(arr, np.ndarray): if arr.dtype.kind == "M": from pandas.core.arrays import DatetimeArray return DatetimeArray._from_sequence(arr) elif arr.dtype.kind == "m": from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(arr) return arr class FloatArrayFormatter(GenericArrayFormatter): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # float_format is expected to be a string # formatter should be used to pass a function if self.float_format is not None and self.formatter is None: # GH21625, GH22270 self.fixed_width = False if callable(self.float_format): self.formatter = self.float_format self.float_format = None def _value_formatter( self, float_format: FloatFormatType | None = None, threshold: float | None = None, ) -> Callable: """Returns a function to be applied on each value to format it""" # the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format # we are going to compose different functions, to first convert to # a string, then replace the decimal symbol, and finally chop according # to the threshold # when there is no float_format, we use str instead of '%g' # because str(0.0) = '0.0' while '%g' % 0.0 = '0' if float_format: def base_formatter(v): assert float_format is not None # for mypy # error: "str" not callable # error: Unexpected keyword argument "value" for "__call__" of # "EngFormatter" return ( float_format(value=v) # type: ignore[operator,call-arg] if notna(v) else self.na_rep ) else: def base_formatter(v): return str(v) if notna(v) else self.na_rep if self.decimal != ".": def decimal_formatter(v): return base_formatter(v).replace(".", self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notna(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter def get_result_as_array(self) -> np.ndarray: """ Returns the float values converted into strings using the parameters given at initialisation, as a numpy array """ def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str): mask = isna(values) formatted = np.array( [ formatter(val) if not m else na_rep for val, m in zip(values.ravel(), mask.ravel()) ] ).reshape(values.shape) return formatted if self.formatter is not None: return format_with_na_rep(self.values, self.formatter, self.na_rep) if self.fixed_width: threshold = get_option("display.chop_threshold") else: threshold = None # if we have a fixed_width, we'll need to try different float_format def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) # default formatter leaves a space to the left when formatting # floats, must be consistent for left-justifying NaNs (GH #25061) if self.justify == "left": na_rep = " " + self.na_rep else: na_rep = self.na_rep # separate the wheat from the chaff values = self.values is_complex = is_complex_dtype(values) values = format_with_na_rep(values, formatter, na_rep) if self.fixed_width: if is_complex: result = _trim_zeros_complex(values, self.decimal) else: result = _trim_zeros_float(values, self.decimal) return np.asarray(result, dtype="object") return values # There is a special default string when we are fixed-width # The default is otherwise to use str instead of a formatting string float_format: FloatFormatType | None if self.float_format is None: if self.fixed_width: if self.leading_space is True: fmt_str = "{value: .{digits:d}f}" else: fmt_str = "{value:.{digits:d}f}" float_format = partial(fmt_str.format, digits=self.digits) else: float_format = self.float_format else: float_format = lambda value: self.float_format % value formatted_values = format_values_with(float_format) if not self.fixed_width: return formatted_values # we need do convert to engineering format if some values are too small # and would appear as 0, or if some values are too big and take too # much space if len(formatted_values) > 0: maxlen = max(len(x) for x in formatted_values) too_long = maxlen > self.digits + 6 else: too_long = False with np.errstate(invalid="ignore"): abs_vals = np.abs(self.values) # this is pretty arbitrary for now # large values: more that 8 characters including decimal symbol # and first digit, hence > 1e6 has_large_values = (abs_vals > 1e6).any() has_small_values = ( (abs_vals < 10 ** (-self.digits)) & (abs_vals > 0) ).any() if has_small_values or (too_long and has_large_values): if self.leading_space is True: fmt_str = "{value: .{digits:d}e}" else: fmt_str = "{value:.{digits:d}e}" float_format = partial(fmt_str.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values def _format_strings(self) -> list[str]: return list(self.get_result_as_array()) The provided code snippet includes necessary dependencies for implementing the `to_native_types` function. Write a Python function `def to_native_types( values: ArrayLike, *, na_rep: str = "nan", quoting=None, float_format=None, decimal: str = ".", **kwargs, ) -> np.ndarray` to solve the following problem: convert to our native types format Here is the function: def to_native_types( values: ArrayLike, *, na_rep: str = "nan", quoting=None, float_format=None, decimal: str = ".", **kwargs, ) -> np.ndarray: """convert to our native types format""" if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm": # GH#40754 Convert categorical datetimes to datetime array values = algos.take_nd( values.categories._values, ensure_platform_int(values._codes), fill_value=na_rep, ) values = ensure_wrapped_if_datetimelike(values) if isinstance(values, (DatetimeArray, TimedeltaArray)): if values.ndim == 1: result = values._format_native_types(na_rep=na_rep, **kwargs) result = result.astype(object, copy=False) return result # GH#21734 Process every column separately, they might have different formats results_converted = [] for i in range(len(values)): result = values[i, :]._format_native_types(na_rep=na_rep, **kwargs) results_converted.append(result.astype(object, copy=False)) return np.vstack(results_converted) elif values.dtype.kind == "f" and not is_sparse(values): # see GH#13418: no special formatting is desired at the # output (important for appropriate 'quoting' behaviour), # so do not pass it through the FloatArrayFormatter if float_format is None and decimal == ".": mask = isna(values) if not quoting: values = values.astype(str) else: values = np.array(values, dtype="object") values[mask] = na_rep values = values.astype(object, copy=False) return values from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter( values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) res = formatter.get_result_as_array() res = res.astype(object, copy=False) return res elif isinstance(values, ExtensionArray): mask = isna(values) new_values = np.asarray(values.astype(object)) new_values[mask] = na_rep return new_values else: mask = isna(values) itemsize = writers.word_len(na_rep) if values.dtype != _dtype_obj and not quoting and itemsize: values = values.astype(str) if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize: # enlarge for the na_rep values = values.astype(f"<U{itemsize}") else: values = np.array(values, dtype="object") values[mask] = na_rep values = values.astype(object, copy=False) return values
convert to our native types format
173,088
from __future__ import annotations from functools import wraps import re from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast, final, ) import numpy as np from pandas._config import using_copy_on_write from pandas._libs import ( internals as libinternals, lib, writers, ) from pandas._libs.internals import ( BlockPlacement, BlockValuesRefs, ) from pandas._libs.missing import NA from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, F, FillnaOptions, IgnoreRaise, QuantileInterpolation, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import ( astype_array_safe, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, is_interval_dtype, is_list_like, is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, should_use_regex, ) from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_setitem_lengths def using_copy_on_write(): _mode_options = _global_config["mode"] return _mode_options["copy_on_write"] and _mode_options["data_manager"] == "block" ArrayLike = Union["ExtensionArray", np.ndarray] The provided code snippet includes necessary dependencies for implementing the `external_values` function. Write a Python function `def external_values(values: ArrayLike) -> ArrayLike` to solve the following problem: The array that Series.values returns (public attribute). This has some historical constraints, and is overridden in block subclasses to return the correct array (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray instead of proper extension array). Here is the function: def external_values(values: ArrayLike) -> ArrayLike: """ The array that Series.values returns (public attribute). This has some historical constraints, and is overridden in block subclasses to return the correct array (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray instead of proper extension array). """ if isinstance(values, (PeriodArray, IntervalArray)): return values.astype(object) elif isinstance(values, (DatetimeArray, TimedeltaArray)): # NB: for datetime64tz this is different from np.asarray(values), since # that returns an object-dtype ndarray of Timestamps. # Avoid raising in .astype in casting from dt64tz to dt64 values = values._ndarray if isinstance(values, np.ndarray) and using_copy_on_write(): values = values.view() values.flags.writeable = False # TODO(CoW) we should also mark our ExtensionArrays as read-only return values
The array that Series.values returns (public attribute). This has some historical constraints, and is overridden in block subclasses to return the correct array (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray instead of proper extension array).
173,089
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def arrays_to_mgr( arrays, columns: Index, index, *, dtype: DtypeObj | None = None, verify_integrity: bool = True, typ: str | None = None, consolidate: bool = True, ) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if verify_integrity: # figure out the index, if necessary if index is None: index = _extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays, refs = _homogenize(arrays, index, dtype) # _homogenize ensures # - all(len(x) == len(index) for x in arrays) # - all(x.ndim == 1 for x in arrays) # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) # - all(type(x) is not PandasArray for x in arrays) else: index = ensure_index(index) arrays = [extract_array(x, extract_numpy=True) for x in arrays] # with _from_arrays, the passed arrays should never be Series objects refs = [None] * len(arrays) # Reached via DataFrame._from_arrays; we do minimal validation here for arr in arrays: if ( not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index) ): raise ValueError( "Arrays must be 1-dimensional np.ndarray or ExtensionArray " "with length matching len(index)" ) columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(arrays) must match len(columns)") # from BlockManager perspective axes = [columns, index] if typ == "block": return create_block_manager_from_column_arrays( arrays, axes, consolidate=consolidate, refs=refs ) elif typ == "array": return ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") def reorder_arrays( arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int ) -> tuple[list[ArrayLike], Index]: """ Pre-emptively (cheaply) reindex arrays with new columns. """ # reorder according to the columns if columns is not None: if not columns.equals(arr_columns): # if they are equal, there is nothing to do new_arrays: list[ArrayLike | None] new_arrays = [None] * len(columns) indexer = arr_columns.get_indexer(columns) for i, k in enumerate(indexer): if k == -1: # by convention default is all-NaN object dtype arr = np.empty(length, dtype=object) arr.fill(np.nan) else: arr = arrays[k] new_arrays[i] = arr # Incompatible types in assignment (expression has type # "List[Union[ExtensionArray, ndarray[Any, Any], None]]", variable # has type "List[Union[ExtensionArray, ndarray[Any, Any]]]") arrays = new_arrays # type: ignore[assignment] arr_columns = columns return arrays, arr_columns def to_arrays( data, columns: Index | None, dtype: DtypeObj | None = None ) -> tuple[list[ArrayLike], Index]: """ Return list of arrays, columns. Returns ------- list[ArrayLike] These will become columns in a DataFrame. Index This will become frame.columns. Notes ----- Ensures that len(result_arrays) == len(result_index). """ if isinstance(data, ABCDataFrame): # see test_from_records_with_index_data, test_from_records_bad_index_column if columns is not None: arrays = [ data._ixs(i, axis=1)._values for i, col in enumerate(data.columns) if col in columns ] else: columns = data.columns arrays = [data._ixs(i, axis=1)._values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): if data.dtype.names is not None: # i.e. numpy structured array columns = ensure_index(data.dtype.names) arrays = [data[name] for name in columns] if len(data) == 0: # GH#42456 the indexing above results in list of 2D ndarrays # TODO: is that an issue with numpy? for i, arr in enumerate(arrays): if arr.ndim == 2: arrays[i] = arr[:, 0] return arrays, columns return [], ensure_index([]) elif isinstance(data, np.ndarray) and data.dtype.names is not None: # e.g. recarray columns = Index(list(data.dtype.names)) arrays = [data[k] for k in columns] return arrays, columns if isinstance(data[0], (list, tuple)): arr = _list_to_arrays(data) elif isinstance(data[0], abc.Mapping): arr, columns = _list_of_dict_to_arrays(data, columns) elif isinstance(data[0], ABCSeries): arr, columns = _list_of_series_to_arrays(data, columns) else: # last ditch effort data = [tuple(x) for x in data] arr = _list_to_arrays(data) content, columns = _finalize_columns_and_data(arr, columns, dtype) return content, columns DtypeObj = Union[np.dtype, "ExtensionDtype"] Manager = Union[ "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" ] def default_index(n: int) -> RangeIndex: rng = range(0, n) return RangeIndex._simple_new(rng, name=None) The provided code snippet includes necessary dependencies for implementing the `rec_array_to_mgr` function. Write a Python function `def rec_array_to_mgr( data: np.recarray | np.ndarray, index, columns, dtype: DtypeObj | None, copy: bool, typ: str, ) -> Manager` to solve the following problem: Extract from a masked rec array and create the manager. Here is the function: def rec_array_to_mgr( data: np.recarray | np.ndarray, index, columns, dtype: DtypeObj | None, copy: bool, typ: str, ) -> Manager: """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fdata = ma.getdata(data) if index is None: index = default_index(len(fdata)) else: index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # create the manager arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index)) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) if copy: mgr = mgr.copy() return mgr
Extract from a masked rec array and create the manager.
173,090
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def arrays_to_mgr( arrays, columns: Index, index, *, dtype: DtypeObj | None = None, verify_integrity: bool = True, typ: str | None = None, consolidate: bool = True, ) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if verify_integrity: # figure out the index, if necessary if index is None: index = _extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays, refs = _homogenize(arrays, index, dtype) # _homogenize ensures # - all(len(x) == len(index) for x in arrays) # - all(x.ndim == 1 for x in arrays) # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) # - all(type(x) is not PandasArray for x in arrays) else: index = ensure_index(index) arrays = [extract_array(x, extract_numpy=True) for x in arrays] # with _from_arrays, the passed arrays should never be Series objects refs = [None] * len(arrays) # Reached via DataFrame._from_arrays; we do minimal validation here for arr in arrays: if ( not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index) ): raise ValueError( "Arrays must be 1-dimensional np.ndarray or ExtensionArray " "with length matching len(index)" ) columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(arrays) must match len(columns)") # from BlockManager perspective axes = [columns, index] if typ == "block": return create_block_manager_from_column_arrays( arrays, axes, consolidate=consolidate, refs=refs ) elif typ == "array": return ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") Manager = Union[ "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" ] class ArrayManager(BaseArrayManager): def ndim(self) -> Literal[2]: return 2 def __init__( self, arrays: list[np.ndarray | ExtensionArray], axes: list[Index], verify_integrity: bool = True, ) -> None: # Note: we are storing the axes in "_axes" in the (row, columns) order # which contrasts the order how it is stored in BlockManager self._axes = axes self.arrays = arrays if verify_integrity: self._axes = [ensure_index(ax) for ax in axes] arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] self.arrays = [maybe_coerce_values(arr) for arr in arrays] self._verify_integrity() def _verify_integrity(self) -> None: n_rows, n_columns = self.shape_proper if not len(self.arrays) == n_columns: raise ValueError( "Number of passed arrays must equal the size of the column Index: " f"{len(self.arrays)} arrays vs {n_columns} columns." ) for arr in self.arrays: if not len(arr) == n_rows: raise ValueError( "Passed arrays should have the same length as the rows Index: " f"{len(arr)} vs {n_rows} rows" ) if not isinstance(arr, (np.ndarray, ExtensionArray)): raise ValueError( "Passed arrays should be np.ndarray or ExtensionArray instances, " f"got {type(arr)} instead" ) if not arr.ndim == 1: raise ValueError( "Passed arrays should be 1-dimensional, got array with " f"{arr.ndim} dimensions instead." ) # -------------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleArrayManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) values = [arr[loc] for arr in self.arrays] if isinstance(dtype, ExtensionDtype): result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT elif is_datetime64_ns_dtype(dtype): result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray elif is_timedelta64_ns_dtype(dtype): result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray else: result = np.array(values, dtype=dtype) return SingleArrayManager([result], [self._axes[1]]) def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: axis = self._normalize_axis(axis) if axis == 0: arrays = [arr[slobj] for arr in self.arrays] elif axis == 1: arrays = self.arrays[slobj] new_axes = list(self._axes) new_axes[axis] = new_axes[axis]._getitem_slice(slobj) return type(self)(arrays, new_axes, verify_integrity=False) def iget(self, i: int) -> SingleArrayManager: """ Return the data as a SingleArrayManager. """ values = self.arrays[i] return SingleArrayManager([values], [self._axes[0]]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). """ return self.arrays[i] def column_arrays(self) -> list[ArrayLike]: """ Used in the JSON C code to access column arrays. """ return [np.asarray(arr) for arr in self.arrays] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ) -> None: """ Set new column(s). This changes the ArrayManager in-place, but replaces (an) existing column(s), not changing column values in-place). Parameters ---------- loc : integer, slice or boolean mask Positional location (already bounds checked) value : np.ndarray or ExtensionArray inplace : bool, default False Whether overwrite existing array as opposed to replacing it. """ # single column -> single integer index if lib.is_integer(loc): # TODO can we avoid needing to unpack this here? That means converting # DataFrame into 1D array when loc is an integer if isinstance(value, np.ndarray) and value.ndim == 2: assert value.shape[1] == 1 value = value[:, 0] # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item # but we should avoid that and pass directly the proper array value = maybe_coerce_values(value) assert isinstance(value, (np.ndarray, ExtensionArray)) assert value.ndim == 1 assert len(value) == len(self._axes[0]) self.arrays[loc] = value return # multiple columns -> convert slice or array to integer indices elif isinstance(loc, slice): indices = range( loc.start if loc.start is not None else 0, loc.stop if loc.stop is not None else self.shape_proper[1], loc.step if loc.step is not None else 1, ) else: assert isinstance(loc, np.ndarray) assert loc.dtype == "bool" # error: Incompatible types in assignment (expression has type "ndarray", # variable has type "range") indices = np.nonzero(loc)[0] # type: ignore[assignment] assert value.ndim == 2 assert value.shape[0] == len(self._axes[0]) for value_idx, mgr_idx in enumerate(indices): # error: No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, int]" value_arr = value[:, value_idx] # type: ignore[call-overload] self.arrays[mgr_idx] = value_arr return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the ArrayManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if not is_integer(loc): raise TypeError("The column index should be an integer") arr = self.arrays[loc] mgr = SingleArrayManager([arr], [self._axes[0]]) if inplace_only: mgr.setitem_inplace(idx, value) else: new_mgr = mgr.setitem((idx,), value) # update existing ArrayManager in-place self.arrays[loc] = new_mgr.arrays[0] def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) value = extract_array(value, extract_numpy=True) if value.ndim == 2: if value.shape[0] == 1: # error: No overload variant of "__getitem__" of "ExtensionArray" # matches argument type "Tuple[int, slice]" value = value[0, :] # type: ignore[call-overload] else: raise ValueError( f"Expected a 1D array, got an array with shape {value.shape}" ) value = maybe_coerce_values(value) # TODO self.arrays can be empty # assert len(value) == len(self.arrays[0]) # TODO is this copy needed? arrays = self.arrays.copy() arrays.insert(loc, value) self.arrays = arrays self._axes[1] = new_axis def idelete(self, indexer) -> ArrayManager: """ Delete selected locations in-place (new block and array, same BlockManager) """ to_keep = np.ones(self.shape[0], dtype=np.bool_) to_keep[indexer] = False self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] self._axes = [self._axes[0], self._axes[1][to_keep]] return self # -------------------------------------------------------------------- # Array-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function columnwise, returning a new ArrayManager. Parameters ---------- func : grouped reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] result_indices: list[int] = [] for i, arr in enumerate(self.arrays): # grouped_reduce functions all expect 2D arrays arr = ensure_block_shape(arr, ndim=2) res = func(arr) if res.ndim == 2: # reverse of ensure_block_shape assert res.shape[0] == 1 res = res[0] result_arrays.append(res) result_indices.append(i) if len(result_arrays) == 0: nrows = 0 else: nrows = result_arrays[0].shape[0] index = Index(range(nrows)) columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] def reduce(self: T, func: Callable) -> T: """ Apply reduction function column-wise, returning a single-row ArrayManager. Parameters ---------- func : reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] for i, arr in enumerate(self.arrays): res = func(arr, axis=0) # TODO NaT doesn't preserve dtype, so we need to ensure to create # a timedelta result array if original was timedelta # what if datetime results in timedelta? (eg std) dtype = arr.dtype if res is NaT else None result_arrays.append( sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] ) index = Index._simple_new(np.array([None], dtype=object)) # placeholder columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] return new_mgr def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ # TODO what if `other` is BlockManager ? left_arrays = self.arrays right_arrays = other.arrays result_arrays = [ array_op(left, right) for left, right in zip(left_arrays, right_arrays) ] return type(self)(result_arrays, self._axes) def quantile( self, *, qs: Index, # with dtype float64 axis: AxisInt = 0, transposed: bool = False, interpolation: QuantileInterpolation = "linear", ) -> ArrayManager: arrs = [ensure_block_shape(x, 2) for x in self.arrays] assert axis == 1 new_arrs = [ quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs ] for i, arr in enumerate(new_arrs): if arr.ndim == 2: assert arr.shape[0] == 1, arr.shape new_arrs[i] = arr[0] axes = [qs, self._axes[1]] return type(self)(new_arrs, axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> ArrayManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ indexer, _ = unstacker._indexer_and_to_sort if unstacker.mask.all(): new_indexer = indexer allow_fill = False new_mask2D = None needs_masking = None else: new_indexer = np.full(unstacker.mask.shape, -1) new_indexer[unstacker.mask] = indexer allow_fill = True # calculating the full mask once and passing it to take_1d is faster # than letting take_1d calculate it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) new_indexer2D = new_indexer.reshape(*unstacker.full_shape) new_indexer2D = ensure_platform_int(new_indexer2D) new_arrays = [] for arr in self.arrays: for i in range(unstacker.full_shape[1]): if allow_fill: # error: Value of type "Optional[Any]" is not indexable [index] new_arr = take_1d( arr, new_indexer2D[:, i], allow_fill=needs_masking[i], # type: ignore[index] fill_value=fill_value, mask=new_mask2D[:, i], # type: ignore[index] ) else: new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) new_arrays.append(new_arr) new_index = unstacker.new_index new_columns = unstacker.get_new_columns(self._axes[1]) new_axes = [new_index, new_columns] return type(self)(new_arrays, new_axes, verify_integrity=False) def as_array( self, dtype=None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : object, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ if len(self.arrays) == 0: empty_arr = np.empty(self.shape, dtype=float) return empty_arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if not dtype: dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) if isinstance(dtype, SparseDtype): dtype = dtype.subtype elif isinstance(dtype, PandasDtype): dtype = dtype.numpy_dtype elif is_extension_array_dtype(dtype): dtype = "object" elif is_dtype_equal(dtype, str): dtype = "object" result = np.empty(self.shape_proper, dtype=dtype) for i, arr in enumerate(self.arrays): arr = arr.astype(dtype, copy=copy) result[:, i] = arr if na_value is not lib.no_default: result[isna(result)] = na_value return result class SingleArrayManager(BaseArrayManager, SingleDataManager): __slots__ = [ "_axes", # private attribute, because 'axes' has different order, see below "arrays", ] arrays: list[np.ndarray | ExtensionArray] _axes: list[Index] def ndim(self) -> Literal[1]: return 1 def __init__( self, arrays: list[np.ndarray | ExtensionArray], axes: list[Index], verify_integrity: bool = True, ) -> None: self._axes = axes self.arrays = arrays if verify_integrity: assert len(axes) == 1 assert len(arrays) == 1 self._axes = [ensure_index(ax) for ax in self._axes] arr = arrays[0] arr = maybe_coerce_values(arr) arr = extract_pandas_array(arr, None, 1)[0] self.arrays = [arr] self._verify_integrity() def _verify_integrity(self) -> None: (n_rows,) = self.shape assert len(self.arrays) == 1 arr = self.arrays[0] assert len(arr) == n_rows if not arr.ndim == 1: raise ValueError( "Passed array should be 1-dimensional, got array with " f"{arr.ndim} dimensions instead." ) def _normalize_axis(axis): return axis def make_empty(self, axes=None) -> SingleArrayManager: """Return an empty ArrayManager with index/array of length 0""" if axes is None: axes = [Index([], dtype=object)] array: np.ndarray = np.array([], dtype=self.dtype) return type(self)([array], axes) def from_array(cls, array, index) -> SingleArrayManager: return cls([array], [index]) def axes(self): return self._axes def index(self) -> Index: return self._axes[0] def dtype(self): return self.array.dtype def external_values(self): """The array that Series.values returns""" return external_values(self.array) def internal_values(self): """The array that Series._values returns""" return self.array def array_values(self): """The array that Series.array returns""" arr = self.array if isinstance(arr, np.ndarray): arr = PandasArray(arr) return arr def _can_hold_na(self) -> bool: if isinstance(self.array, np.ndarray): return self.array.dtype.kind not in ["b", "i", "u"] else: # ExtensionArray return self.array._can_hold_na def is_single_block(self) -> bool: return True def fast_xs(self, loc: int) -> SingleArrayManager: raise NotImplementedError("Use series._values[loc] instead") def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager: if axis >= self.ndim: raise IndexError("Requested axis not found in manager") new_array = self.array[slobj] new_index = self.index._getitem_slice(slobj) return type(self)([new_array], [new_index], verify_integrity=False) def getitem_mgr(self, indexer) -> SingleArrayManager: new_array = self.array[indexer] new_index = self.index[indexer] return type(self)([new_array], [new_index]) def apply(self, func, **kwargs): if callable(func): new_array = func(self.array, **kwargs) else: new_array = getattr(self.array, func)(**kwargs) return type(self)([new_array], self._axes) def setitem(self, indexer, value) -> SingleArrayManager: """ Set values with indexer. For SingleArrayManager, this backs s[indexer] = value See `setitem_inplace` for a version that works inplace and doesn't return a new Manager. """ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: raise ValueError(f"Cannot set values with ndim > {self.ndim}") return self.apply_with_block("setitem", indexer=indexer, value=value) def idelete(self, indexer) -> SingleArrayManager: """ Delete selected locations in-place (new array, same ArrayManager) """ to_keep = np.ones(self.shape[0], dtype=np.bool_) to_keep[indexer] = False self.arrays = [self.arrays[0][to_keep]] self._axes = [self._axes[0][to_keep]] return self def _get_data_subset(self, predicate: Callable) -> SingleArrayManager: # used in get_numeric_data / get_bool_data if predicate(self.array): return type(self)(self.arrays, self._axes, verify_integrity=False) else: return self.make_empty() def set_values(self, values: ArrayLike) -> None: """ Set (replace) the values of the SingleArrayManager in place. Use at your own risk! This does not check if the passed values are valid for the current SingleArrayManager (length, dtype, etc). """ self.arrays[0] = values def to_2d_mgr(self, columns: Index) -> ArrayManager: """ Manager analogue of Series.to_frame """ arrays = [self.arrays[0]] axes = [self.axes[0], columns] return ArrayManager(arrays, axes, verify_integrity=False) class BlockManager(libinternals.BlockManager, BaseBlockManager): """ BaseBlockManager that holds 2D blocks. """ ndim = 2 # ---------------------------------------------------------------- # Constructors def __init__( self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool = True, ) -> None: if verify_integrity: # Assertion disabled for performance # assert all(isinstance(x, Index) for x in axes) for block in blocks: if self.ndim != block.ndim: raise AssertionError( f"Number of Block dimensions ({block.ndim}) must equal " f"number of axes ({self.ndim})" ) # As of 2.0, the caller is responsible for ensuring that # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; # previously there was a special check for fastparquet compat. self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( "Number of manager items must equal union of " f"block items\n# manager items: {len(self.items)}, # " f"tot_items: {tot_items}" ) def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ return cls(blocks, axes, verify_integrity=False) # ---------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleBlockManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ if len(self.blocks) == 1: # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; # is this ruled out in the general case? result = self.blocks[0].iget((slice(None), loc)) # in the case of a single block, the new block is a view block = new_block( result, placement=slice(0, len(result)), ndim=1, refs=self.blocks[0].refs, ) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) # GH#46406 immutable_ea = isinstance(dtype, SparseDtype) if isinstance(dtype, ExtensionDtype) and not immutable_ea: cls = dtype.construct_array_type() result = cls._empty((n,), dtype=dtype) else: # error: Argument "dtype" to "empty" has incompatible type # "Union[Type[object], dtype[Any], ExtensionDtype, None]"; expected # "None" result = np.empty( n, dtype=object if immutable_ea else dtype # type: ignore[arg-type] ) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) if immutable_ea: dtype = cast(ExtensionDtype, dtype) result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) block = new_block(result, placement=slice(0, len(result)), ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)( values, placement=bp, ndim=1, refs=block.refs if track_ref else None ) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution. """ # TODO(CoW) making the arrays read-only might make this safer to use? block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values def column_arrays(self) -> list[np.ndarray]: """ Used in the JSON C code to access column arrays. This optimizes compared to using `iget_values` by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine). """ # This is an optimized equivalent to # result = [self.iget_values(i) for i in range(len(self.items))] result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.values_for_json() if values.ndim == 1: # TODO(EA2D): special casing not needed with 2D EAs result[mgr_locs[0]] = values else: for i, loc in enumerate(mgr_locs): result[loc] = values[i] # error: Incompatible return value type (got "List[None]", # expected "List[ndarray[Any, Any]]") return result # type: ignore[return-value] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError( "Shape of new values must be compatible with manager shape" ) if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) # Check if we can use _iset_single fastpath loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? return self._iset_single( loc, value, inplace=inplace, blkno=blkno, blk=blk, ) # error: Incompatible types in assignment (expression has type # "List[Union[int, slice, ndarray]]", variable has type "Union[int, # slice, ndarray]") loc = [loc] # type: ignore[assignment] # categorical/sparse/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): # Updating inplace -> check if we need to do Copy-on-Write if using_copy_on_write() and not self._has_no_reference_block(blkno_l): self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs)) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: # Defer setting the new values to enable consolidation self._iset_split_block(blkno_l, blk_locs) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple( blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) ) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] # TODO(CoW) is this always correct to assume that the new_blocks # are not referencing anything else? if value_is_extension_type: # This code (ab-)uses the fact that EA blocks contain only # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( new_block_2d( values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), ) for mgr_loc in unfit_idxr ) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( new_block_2d( values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), ) ) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def _iset_split_block( self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None = None, ) -> None: """Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determinint the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. """ blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs)) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = ( self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup ) self.blocks = blocks_tup if not nbs_tup and value is not None: # No need to update anything if split did not happen return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for i, nb in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single( self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block ) -> None: """ Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected. """ # Caller is responsible for verifying value.shape if inplace and blk.should_store(value): copy = False if using_copy_on_write() and not self._has_no_reference_block(blkno): # perform Copy-on-Write and clear the reference copy = True iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] self.blocks = new_blocks return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if using_copy_on_write() and not self._has_no_reference(loc): blkno = self.blknos[loc] # Split blocks to only copy the column we want to modify blk_loc = self.blklocs[loc] # Copy our values values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: # Use [blk_loc] as indexer to keep ndim=2, this already results in a # copy values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) # this manager is only created temporarily to mutate the values in place # so don't track references, otherwise the `setitem` would perform CoW again col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError( f"Expected a 1D array, got an array with shape {value.T.shape}" ) else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) # TODO(CoW) do we always "own" the passed `value`? block = new_block_2d(values=value, placement=bp) if not len(self.blocks): # Fastpath self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if sum(not block.is_extension for block in self.blocks) > 100: warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: """ When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ for blkno, count in _fast_count_smallints(self.blknos[loc:]): # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: """ Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager """ result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: # split on object-dtype blocks bc some columns may raise # while others do not. for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = Index(range(nrows)) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self: T, func: Callable) -> T: """ Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager """ # If 2D, we assume that we're operating column-wise assert self.ndim == 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) # placeholder new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ return blockwise_all(self, other, array_equals) def quantile( self: T, *, qs: Index, # with dtype float 64 axis: AxisInt = 0, interpolation: QuantileInterpolation = "linear", ) -> T: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: bool, default True. Join together blocks having same dtype interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 assert is_list_like(qs) # caller is responsible for this assert axis == 1 # only ever called this way new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [ blk.quantile(axis=axis, qs=qs, interpolation=interpolation) for blk in self.blocks ] return type(self)(blocks, new_axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_dict(self, copy: bool = True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : bool, default True Returns ------- values : a dict of dtype -> BlockManager """ bd: dict[str, list[Block]] = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) # TODO(EA2D): the combine will be unnecessary with 2D EAs return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} def as_array( self, dtype: np.dtype | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ # TODO(CoW) handle case where resulting array is a view if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if self.is_single_block: blk = self.blocks[0] if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ).reshape(blk.shape) else: arr = np.asarray(blk.get_values()) if dtype: arr = arr.astype(dtype, copy=False) if copy: arr = arr.copy() elif using_copy_on_write(): arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave, so no need # to further copy if copy=True or setting na_value if na_value is not lib.no_default: arr[isna(arr)] = na_value return arr.transpose() def _interleave( self, dtype: np.dtype | None = None, na_value: object = lib.no_default, ) -> np.ndarray: """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ if not dtype: # Incompatible types in assignment (expression has type # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has # type "Optional[dtype[Any]]") dtype = interleaved_dtype( # type: ignore[assignment] [blk.dtype for blk in self.blocks] ) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if isinstance(dtype, SparseDtype): dtype = dtype.subtype dtype = cast(np.dtype, dtype) elif isinstance(dtype, ExtensionDtype): dtype = np.dtype("object") elif is_dtype_equal(dtype, str): dtype = np.dtype("object") result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype("object") and na_value is lib.no_default: # much more performant than using to_numpy below for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ) else: arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError("Some items were not contained in blocks") return result # ---------------------------------------------------------------- # Consolidation def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: # fastpath self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: # In general, _consolidate_inplace should only be called via # DataFrame._consolidate_inplace, otherwise we will fail to invalidate # the DataFrame's _item_cache. The exception is for newly-created # BlockManager objects not yet attached to a DataFrame. if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() class SingleBlockManager(BaseBlockManager, SingleDataManager): """manage a single block with""" def ndim(self) -> Literal[1]: return 1 _is_consolidated = True _known_consolidated = True __slots__ = () is_single_block = True def __init__( self, block: Block, axis: Index, verify_integrity: bool = False, ) -> None: # Assertions disabled for performance # assert isinstance(block, Block), type(block) # assert isinstance(axis, Index), type(axis) self.axes = [axis] self.blocks = (block,) def from_blocks( cls, blocks: list[Block], axes: list[Index], ) -> SingleBlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ assert len(blocks) == 1 assert len(axes) == 1 return cls(blocks[0], axes[0], verify_integrity=False) def from_array( cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None ) -> SingleBlockManager: """ Constructor for if we have an array that is not yet a Block. """ block = new_block(array, placement=slice(0, len(index)), ndim=1, refs=refs) return cls(block, index) def to_2d_mgr(self, columns: Index) -> BlockManager: """ Manager analogue of Series.to_frame """ blk = self.blocks[0] arr = ensure_block_shape(blk.values, ndim=2) bp = BlockPlacement(0) new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs) axes = [columns, self.axes[0]] return BlockManager([new_blk], axes=axes, verify_integrity=False) def _has_no_reference(self, i: int = 0) -> bool: """ Check for column `i` if it has references. (whether it references another array or is itself being referenced) Returns True if the column has no references. """ return not self.blocks[0].refs.has_reference() def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = list(self.axes) extra_state = { "0.14.1": { "axes": axes_array, "blocks": [ {"values": b.values, "mgr_locs": b.mgr_locs.indexer} for b in self.blocks ], } } # First three elements of the state are to maintain forward # compatibility with 0.13.1. return axes_array, block_values, block_items, extra_state def __setstate__(self, state): def unpickle_block(values, mgr_locs, ndim: int) -> Block: # TODO(EA2D): ndim would be unnecessary with 2D EAs # older pickles may store e.g. DatetimeIndex instead of DatetimeArray values = extract_array(values, extract_numpy=True) return new_block(values, placement=mgr_locs, ndim=ndim) if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]: state = state[3]["0.14.1"] self.axes = [ensure_index(ax) for ax in state["axes"]] ndim = len(self.axes) self.blocks = tuple( unpickle_block(b["values"], b["mgr_locs"], ndim=ndim) for b in state["blocks"] ) else: raise NotImplementedError("pre-0.14.1 pickles are no longer supported") self._post_setstate() def _post_setstate(self) -> None: pass def _block(self) -> Block: return self.blocks[0] def _blknos(self): """compat with BlockManager""" return None def _blklocs(self): """compat with BlockManager""" return None def getitem_mgr(self, indexer: slice | np.ndarray) -> SingleBlockManager: # similar to get_slice, but not restricted to slice indexer blk = self._block if ( using_copy_on_write() and isinstance(indexer, np.ndarray) and len(indexer) > 0 and com.is_bool_indexer(indexer) and indexer.all() ): return type(self)(blk.copy(deep=False), self.index) array = blk._slice(indexer) if array.ndim > 1: # This will be caught by Series._get_values raise ValueError("dimension-expanding indexing not allowed") bp = BlockPlacement(slice(0, len(array))) # TODO(CoW) in theory only need to track reference if new_array is a view block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) new_idx = self.index[indexer] return type(self)(block, new_idx) def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager: # Assertion disabled for performance # assert isinstance(slobj, slice), type(slobj) if axis >= self.ndim: raise IndexError("Requested axis not found in manager") blk = self._block array = blk._slice(slobj) bp = BlockPlacement(slice(0, len(array))) # TODO this method is only used in groupby SeriesSplitter at the moment, # so passing refs is not yet covered by the tests block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) new_index = self.index._getitem_slice(slobj) return type(self)(block, new_index) def index(self) -> Index: return self.axes[0] def dtype(self) -> DtypeObj: return self._block.dtype def get_dtypes(self) -> np.ndarray: return np.array([self._block.dtype]) def external_values(self): """The array that Series.values returns""" return self._block.external_values() def internal_values(self): """The array that Series._values returns""" return self._block.values def array_values(self): """The array that Series.array returns""" return self._block.array_values def get_numeric_data(self, copy: bool = False): if self._block.is_numeric: return self.copy(deep=copy) return self.make_empty() def _can_hold_na(self) -> bool: return self._block._can_hold_na def setitem_inplace(self, indexer, value) -> None: """ Set values with indexer. For Single[Block/Array]Manager, this backs s[indexer] = value This is an inplace version of `setitem()`, mutating the manager/values in place, not returning a new Manager (and Block), and thus never changing the dtype. """ if using_copy_on_write() and not self._has_no_reference(0): self.blocks = (self._block.copy(),) self._cache.clear() super().setitem_inplace(indexer, value) def idelete(self, indexer) -> SingleBlockManager: """ Delete single location from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ nb = self._block.delete(indexer)[0] self.blocks = (nb,) self.axes[0] = self.axes[0].delete(indexer) self._cache.clear() return self def fast_xs(self, loc): """ fast path for getting a cross-section return a view of the data """ raise NotImplementedError("Use series._values[loc] instead") def set_values(self, values: ArrayLike) -> None: """ Set the values of the single block in place. Use at your own risk! This does not check if the passed values are valid for the current Block/SingleBlockManager (length, dtype, etc). """ # TODO(CoW) do we need to handle copy on write here? Currently this is # only used for FrameColumnApply.series_generator (what if apply is # mutating inplace?) self.blocks[0].values = values self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values))) def _equal_values(self: T, other: T) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ # For SingleBlockManager (i.e.Series) if other.ndim != 1: return False left = self.blocks[0].values right = other.blocks[0].values return array_equals(left, right) The provided code snippet includes necessary dependencies for implementing the `mgr_to_mgr` function. Write a Python function `def mgr_to_mgr(mgr, typ: str, copy: bool = True)` to solve the following problem: Convert to specific type of Manager. Does not copy if the type is already correct. Does not guarantee a copy otherwise. `copy` keyword only controls whether conversion from Block->ArrayManager copies the 1D arrays. Here is the function: def mgr_to_mgr(mgr, typ: str, copy: bool = True): """ Convert to specific type of Manager. Does not copy if the type is already correct. Does not guarantee a copy otherwise. `copy` keyword only controls whether conversion from Block->ArrayManager copies the 1D arrays. """ new_mgr: Manager if typ == "block": if isinstance(mgr, BlockManager): new_mgr = mgr else: if mgr.ndim == 2: new_mgr = arrays_to_mgr( mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" ) else: new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index) elif typ == "array": if isinstance(mgr, ArrayManager): new_mgr = mgr else: if mgr.ndim == 2: arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))] if copy: arrays = [arr.copy() for arr in arrays] new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) else: array = mgr.internal_values() if copy: array = array.copy() new_mgr = SingleArrayManager([array], [mgr.index]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") return new_mgr
Convert to specific type of Manager. Does not copy if the type is already correct. Does not guarantee a copy otherwise. `copy` keyword only controls whether conversion from Block->ArrayManager copies the 1D arrays.
173,091
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def arrays_to_mgr( arrays, columns: Index, index, *, dtype: DtypeObj | None = None, verify_integrity: bool = True, typ: str | None = None, consolidate: bool = True, ) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if verify_integrity: # figure out the index, if necessary if index is None: index = _extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays, refs = _homogenize(arrays, index, dtype) # _homogenize ensures # - all(len(x) == len(index) for x in arrays) # - all(x.ndim == 1 for x in arrays) # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) # - all(type(x) is not PandasArray for x in arrays) else: index = ensure_index(index) arrays = [extract_array(x, extract_numpy=True) for x in arrays] # with _from_arrays, the passed arrays should never be Series objects refs = [None] * len(arrays) # Reached via DataFrame._from_arrays; we do minimal validation here for arr in arrays: if ( not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index) ): raise ValueError( "Arrays must be 1-dimensional np.ndarray or ExtensionArray " "with length matching len(index)" ) columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(arrays) must match len(columns)") # from BlockManager perspective axes = [columns, index] if typ == "block": return create_block_manager_from_column_arrays( arrays, axes, consolidate=consolidate, refs=refs ) elif typ == "array": return ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") def _check_values_indices_shape_match( values: np.ndarray, index: Index, columns: Index ) -> None: """ Check that the shape implied by our axes matches the actual shape of the data. """ if values.shape[1] != len(columns) or values.shape[0] != len(index): # Could let this raise in Block constructor, but we get a more # helpful exception message this way. if values.shape[0] == 0: raise ValueError("Empty data passed with indices specified.") passed = values.shape implied = (len(index), len(columns)) raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: # values is specifically _not_ ndarray, EA, Index, or Series # We only get here with `not treat_as_nested(values)` if len(values) == 0: # TODO: check for length-zero range, in which case return int64 dtype? # TODO: re-use anything in try_cast? return np.empty((0, 0), dtype=object) elif isinstance(values, range): arr = range_to_ndarray(values) return arr[..., np.newaxis] def convert(v): if not is_list_like(v) or isinstance(v, ABCDataFrame): return v v = extract_array(v, extract_numpy=True) res = maybe_convert_platform(v) # We don't do maybe_infer_to_datetimelike here bc we will end up doing # it column-by-column in ndarray_to_mgr return res # we could have a 1-dim or 2-dim list here # this is equiv of np.asarray, but does object conversion # and platform dtype preservation # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like # np.asarray would if is_list_like(values[0]): values = np.array([convert(v) for v in values]) elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: # GH#21861 see test_constructor_list_of_lists values = np.array([convert(v) for v in values]) else: values = convert(values) return _ensure_2d(values) def _ensure_2d(values: np.ndarray) -> np.ndarray: """ Reshape 1D values, raise on anything else other than 2D. """ if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: raise ValueError(f"Must pass 2-d input. shape={values.shape}") return values def _get_axes( N: int, K: int, index: Index | None, columns: Index | None ) -> tuple[Index, Index]: # helper to create the axes as indexes # return axes or defaults if index is None: index = default_index(N) else: index = ensure_index(index) if columns is None: columns = default_index(K) else: columns = ensure_index(columns) return index, columns DtypeObj = Union[np.dtype, "ExtensionDtype"] Manager = Union[ "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" ] def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: """Checks if astype avoided copying the data. Parameters ---------- dtype : Original dtype new_dtype : target dtype Returns ------- True if new data is a view or not guaranteed to be a copy, False otherwise """ if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype): new_dtype, dtype = dtype, new_dtype if dtype == new_dtype: return True elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): # Only equal numpy dtypes avoid a copy return False elif is_string_dtype(dtype) and is_string_dtype(new_dtype): # Potentially! a view when converting from object to string return True elif is_object_dtype(dtype) and new_dtype.kind == "O": # When the underlying array has dtype object, we don't have to make a copy return True elif dtype.kind in "mM" and new_dtype.kind in "mM": dtype = getattr(dtype, "numpy_dtype", dtype) new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype) return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None) numpy_dtype = getattr(dtype, "numpy_dtype", None) new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None) if numpy_dtype is None and isinstance(dtype, np.dtype): numpy_dtype = dtype if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): new_numpy_dtype = new_dtype if numpy_dtype is not None and new_numpy_dtype is not None: # if both have NumPy dtype or one of them is a numpy dtype # they are only a view when the numpy dtypes are equal, e.g. # int64 -> Int64 or int64[pyarrow] # int64 -> Int32 copies return numpy_dtype == new_numpy_dtype # Assume this is a view since we don't know for sure if a copy was made return True def maybe_infer_to_datetimelike( value: npt.NDArray[np.object_], ) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: """ we might have a array (or single object) that is datetime like, and no dtype is passed don't change the value unless we find a datetime/timedelta set this is pretty strict in that a datetime/timedelta is REQUIRED in addition to possible nulls/string likes Parameters ---------- value : np.ndarray[object] Returns ------- np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray """ if not isinstance(value, np.ndarray) or value.dtype != object: # Caller is responsible for passing only ndarray[object] raise TypeError(type(value)) # pragma: no cover if value.ndim != 1: # Caller is responsible raise ValueError(value.ndim) # pragma: no cover if not len(value): return value # error: Incompatible return value type (got "Union[ExtensionArray, # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray, # TimedeltaArray, PeriodArray, IntervalArray]") return lib.maybe_convert_objects( # type: ignore[return-value] value, # Here we do not convert numeric dtypes, as if we wanted that, # numpy would have done it for us. convert_numeric=False, convert_period=True, convert_interval=True, convert_timedelta=True, convert_datetime=True, dtype_if_all_nat=np.dtype("M8[ns]"), ) def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_dtype_equal(source, target) -> bool: """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ if isinstance(target, str): if not isinstance(source, str): # GH#38516 ensure we get the same behavior from # is_dtype_equal(CDT, "category") and CDT == "category" try: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = get_dtype(source) target = get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this return False def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a timedelta64 or datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a timedelta64, or datetime64 dtype. Examples -------- >>> is_datetime_or_timedelta_dtype(str) False >>> is_datetime_or_timedelta_dtype(int) False >>> is_datetime_or_timedelta_dtype(np.datetime64) True >>> is_datetime_or_timedelta_dtype(np.timedelta64) True >>> is_datetime_or_timedelta_dtype(np.array(['a', 'b'])) False >>> is_datetime_or_timedelta_dtype(pd.Series([1, 2])) False >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64)) True >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64)) def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool: """ Analogue to is_extension_array_dtype but excluding DatetimeTZDtype. """ # Note: if other EA dtypes are ever held in HybridBlock, exclude those # here too. # NB: need to check DatetimeTZDtype and not is_datetime64tz_dtype # to exclude ArrowTimestampUSDtype return isinstance(dtype, ExtensionDtype) and not isinstance( dtype, (DatetimeTZDtype, PeriodDtype) ) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... ) -> ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... ) -> T | ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndex, ABCSeries)): if isinstance(obj, ABCRangeIndex): if extract_range: return obj._values # https://github.com/python/mypy/issues/1081 # error: Incompatible return value type (got "RangeIndex", expected # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]") return obj # type: ignore[return-value] return obj._values elif extract_numpy and isinstance(obj, ABCPandasArray): return obj.to_numpy() return obj def ensure_wrapped_if_datetimelike(arr): """ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. """ if isinstance(arr, np.ndarray): if arr.dtype.kind == "M": from pandas.core.arrays import DatetimeArray return DatetimeArray._from_sequence(arr) elif arr.dtype.kind == "m": from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(arr) return arr def sanitize_array( data, index: Index | None, dtype: DtypeObj | None = None, copy: bool = False, *, allow_2d: bool = False, ) -> ArrayLike: """ Sanitize input data to an ndarray or ExtensionArray, copy if specified, coerce to the dtype if specified. Parameters ---------- data : Any index : Index or None, default None dtype : np.dtype, ExtensionDtype, or None, default None copy : bool, default False allow_2d : bool, default False If False, raise if we have a 2D Arraylike. Returns ------- np.ndarray or ExtensionArray """ if isinstance(data, ma.MaskedArray): data = sanitize_masked_array(data) if isinstance(dtype, PandasDtype): # Avoid ending up with a PandasArray dtype = dtype.numpy_dtype # extract ndarray or ExtensionArray, ensure we have no PandasArray data = extract_array(data, extract_numpy=True, extract_range=True) if isinstance(data, np.ndarray) and data.ndim == 0: if dtype is None: dtype = data.dtype data = lib.item_from_zerodim(data) elif isinstance(data, range): # GH#16804 data = range_to_ndarray(data) copy = False if not is_list_like(data): if index is None: raise ValueError("index must be specified when data is not list-like") data = construct_1d_arraylike_from_scalar(data, len(index), dtype) return data elif isinstance(data, ABCExtensionArray): # it is already ensured above this is not a PandasArray # Until GH#49309 is fixed this check needs to come before the # ExtensionDtype check if dtype is not None: subarr = data.astype(dtype, copy=copy) elif copy: subarr = data.copy() else: subarr = data elif isinstance(dtype, ExtensionDtype): # create an extension array from its dtype _sanitize_non_ordered(data) cls = dtype.construct_array_type() subarr = cls._from_sequence(data, dtype=dtype, copy=copy) # GH#846 elif isinstance(data, np.ndarray): if isinstance(data, np.matrix): data = data.A if dtype is None: subarr = data if data.dtype == object: subarr = maybe_infer_to_datetimelike(data) if subarr is data and copy: subarr = subarr.copy() else: # we will try to copy by-definition here subarr = _try_cast(data, dtype, copy) elif hasattr(data, "__array__"): # e.g. dask array GH#38645 data = np.array(data, copy=copy) return sanitize_array( data, index=index, dtype=dtype, copy=False, allow_2d=allow_2d, ) else: _sanitize_non_ordered(data) # materialize e.g. generators, convert e.g. tuples, abc.ValueView data = list(data) if len(data) == 0 and dtype is None: # We default to float64, matching numpy subarr = np.array([], dtype=np.float64) elif dtype is not None: subarr = _try_cast(data, dtype, copy) else: subarr = maybe_convert_platform(data) if subarr.dtype == object: subarr = cast(np.ndarray, subarr) subarr = maybe_infer_to_datetimelike(subarr) subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) if isinstance(subarr, np.ndarray): # at this point we should have dtype be None or subarr.dtype == dtype dtype = cast(np.dtype, dtype) subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) return subarr class ArrayManager(BaseArrayManager): def ndim(self) -> Literal[2]: return 2 def __init__( self, arrays: list[np.ndarray | ExtensionArray], axes: list[Index], verify_integrity: bool = True, ) -> None: # Note: we are storing the axes in "_axes" in the (row, columns) order # which contrasts the order how it is stored in BlockManager self._axes = axes self.arrays = arrays if verify_integrity: self._axes = [ensure_index(ax) for ax in axes] arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] self.arrays = [maybe_coerce_values(arr) for arr in arrays] self._verify_integrity() def _verify_integrity(self) -> None: n_rows, n_columns = self.shape_proper if not len(self.arrays) == n_columns: raise ValueError( "Number of passed arrays must equal the size of the column Index: " f"{len(self.arrays)} arrays vs {n_columns} columns." ) for arr in self.arrays: if not len(arr) == n_rows: raise ValueError( "Passed arrays should have the same length as the rows Index: " f"{len(arr)} vs {n_rows} rows" ) if not isinstance(arr, (np.ndarray, ExtensionArray)): raise ValueError( "Passed arrays should be np.ndarray or ExtensionArray instances, " f"got {type(arr)} instead" ) if not arr.ndim == 1: raise ValueError( "Passed arrays should be 1-dimensional, got array with " f"{arr.ndim} dimensions instead." ) # -------------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleArrayManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) values = [arr[loc] for arr in self.arrays] if isinstance(dtype, ExtensionDtype): result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT elif is_datetime64_ns_dtype(dtype): result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray elif is_timedelta64_ns_dtype(dtype): result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray else: result = np.array(values, dtype=dtype) return SingleArrayManager([result], [self._axes[1]]) def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: axis = self._normalize_axis(axis) if axis == 0: arrays = [arr[slobj] for arr in self.arrays] elif axis == 1: arrays = self.arrays[slobj] new_axes = list(self._axes) new_axes[axis] = new_axes[axis]._getitem_slice(slobj) return type(self)(arrays, new_axes, verify_integrity=False) def iget(self, i: int) -> SingleArrayManager: """ Return the data as a SingleArrayManager. """ values = self.arrays[i] return SingleArrayManager([values], [self._axes[0]]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). """ return self.arrays[i] def column_arrays(self) -> list[ArrayLike]: """ Used in the JSON C code to access column arrays. """ return [np.asarray(arr) for arr in self.arrays] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ) -> None: """ Set new column(s). This changes the ArrayManager in-place, but replaces (an) existing column(s), not changing column values in-place). Parameters ---------- loc : integer, slice or boolean mask Positional location (already bounds checked) value : np.ndarray or ExtensionArray inplace : bool, default False Whether overwrite existing array as opposed to replacing it. """ # single column -> single integer index if lib.is_integer(loc): # TODO can we avoid needing to unpack this here? That means converting # DataFrame into 1D array when loc is an integer if isinstance(value, np.ndarray) and value.ndim == 2: assert value.shape[1] == 1 value = value[:, 0] # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item # but we should avoid that and pass directly the proper array value = maybe_coerce_values(value) assert isinstance(value, (np.ndarray, ExtensionArray)) assert value.ndim == 1 assert len(value) == len(self._axes[0]) self.arrays[loc] = value return # multiple columns -> convert slice or array to integer indices elif isinstance(loc, slice): indices = range( loc.start if loc.start is not None else 0, loc.stop if loc.stop is not None else self.shape_proper[1], loc.step if loc.step is not None else 1, ) else: assert isinstance(loc, np.ndarray) assert loc.dtype == "bool" # error: Incompatible types in assignment (expression has type "ndarray", # variable has type "range") indices = np.nonzero(loc)[0] # type: ignore[assignment] assert value.ndim == 2 assert value.shape[0] == len(self._axes[0]) for value_idx, mgr_idx in enumerate(indices): # error: No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, int]" value_arr = value[:, value_idx] # type: ignore[call-overload] self.arrays[mgr_idx] = value_arr return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the ArrayManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if not is_integer(loc): raise TypeError("The column index should be an integer") arr = self.arrays[loc] mgr = SingleArrayManager([arr], [self._axes[0]]) if inplace_only: mgr.setitem_inplace(idx, value) else: new_mgr = mgr.setitem((idx,), value) # update existing ArrayManager in-place self.arrays[loc] = new_mgr.arrays[0] def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) value = extract_array(value, extract_numpy=True) if value.ndim == 2: if value.shape[0] == 1: # error: No overload variant of "__getitem__" of "ExtensionArray" # matches argument type "Tuple[int, slice]" value = value[0, :] # type: ignore[call-overload] else: raise ValueError( f"Expected a 1D array, got an array with shape {value.shape}" ) value = maybe_coerce_values(value) # TODO self.arrays can be empty # assert len(value) == len(self.arrays[0]) # TODO is this copy needed? arrays = self.arrays.copy() arrays.insert(loc, value) self.arrays = arrays self._axes[1] = new_axis def idelete(self, indexer) -> ArrayManager: """ Delete selected locations in-place (new block and array, same BlockManager) """ to_keep = np.ones(self.shape[0], dtype=np.bool_) to_keep[indexer] = False self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] self._axes = [self._axes[0], self._axes[1][to_keep]] return self # -------------------------------------------------------------------- # Array-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function columnwise, returning a new ArrayManager. Parameters ---------- func : grouped reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] result_indices: list[int] = [] for i, arr in enumerate(self.arrays): # grouped_reduce functions all expect 2D arrays arr = ensure_block_shape(arr, ndim=2) res = func(arr) if res.ndim == 2: # reverse of ensure_block_shape assert res.shape[0] == 1 res = res[0] result_arrays.append(res) result_indices.append(i) if len(result_arrays) == 0: nrows = 0 else: nrows = result_arrays[0].shape[0] index = Index(range(nrows)) columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] def reduce(self: T, func: Callable) -> T: """ Apply reduction function column-wise, returning a single-row ArrayManager. Parameters ---------- func : reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] for i, arr in enumerate(self.arrays): res = func(arr, axis=0) # TODO NaT doesn't preserve dtype, so we need to ensure to create # a timedelta result array if original was timedelta # what if datetime results in timedelta? (eg std) dtype = arr.dtype if res is NaT else None result_arrays.append( sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] ) index = Index._simple_new(np.array([None], dtype=object)) # placeholder columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] return new_mgr def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ # TODO what if `other` is BlockManager ? left_arrays = self.arrays right_arrays = other.arrays result_arrays = [ array_op(left, right) for left, right in zip(left_arrays, right_arrays) ] return type(self)(result_arrays, self._axes) def quantile( self, *, qs: Index, # with dtype float64 axis: AxisInt = 0, transposed: bool = False, interpolation: QuantileInterpolation = "linear", ) -> ArrayManager: arrs = [ensure_block_shape(x, 2) for x in self.arrays] assert axis == 1 new_arrs = [ quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs ] for i, arr in enumerate(new_arrs): if arr.ndim == 2: assert arr.shape[0] == 1, arr.shape new_arrs[i] = arr[0] axes = [qs, self._axes[1]] return type(self)(new_arrs, axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> ArrayManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ indexer, _ = unstacker._indexer_and_to_sort if unstacker.mask.all(): new_indexer = indexer allow_fill = False new_mask2D = None needs_masking = None else: new_indexer = np.full(unstacker.mask.shape, -1) new_indexer[unstacker.mask] = indexer allow_fill = True # calculating the full mask once and passing it to take_1d is faster # than letting take_1d calculate it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) new_indexer2D = new_indexer.reshape(*unstacker.full_shape) new_indexer2D = ensure_platform_int(new_indexer2D) new_arrays = [] for arr in self.arrays: for i in range(unstacker.full_shape[1]): if allow_fill: # error: Value of type "Optional[Any]" is not indexable [index] new_arr = take_1d( arr, new_indexer2D[:, i], allow_fill=needs_masking[i], # type: ignore[index] fill_value=fill_value, mask=new_mask2D[:, i], # type: ignore[index] ) else: new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) new_arrays.append(new_arr) new_index = unstacker.new_index new_columns = unstacker.get_new_columns(self._axes[1]) new_axes = [new_index, new_columns] return type(self)(new_arrays, new_axes, verify_integrity=False) def as_array( self, dtype=None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : object, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ if len(self.arrays) == 0: empty_arr = np.empty(self.shape, dtype=float) return empty_arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if not dtype: dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) if isinstance(dtype, SparseDtype): dtype = dtype.subtype elif isinstance(dtype, PandasDtype): dtype = dtype.numpy_dtype elif is_extension_array_dtype(dtype): dtype = "object" elif is_dtype_equal(dtype, str): dtype = "object" result = np.empty(self.shape_proper, dtype=dtype) for i, arr in enumerate(self.arrays): arr = arr.astype(dtype, copy=copy) result[:, i] = arr if na_value is not lib.no_default: result[isna(result)] = na_value return result def new_block_2d( values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None ): # new_block specialized to case with # ndim=2 # isinstance(placement, BlockPlacement) # check_ndim/ensure_block_shape already checked klass = get_block_type(values.dtype) values = maybe_coerce_values(values) return klass(values, ndim=2, placement=placement, refs=refs) def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: """ Reshape if possible to have values.ndim == ndim. """ if values.ndim < ndim: if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) values = values.reshape(1, -1) return values def create_block_manager_from_blocks( blocks: list[Block], axes: list[Index], consolidate: bool = True, verify_integrity: bool = True, ) -> BlockManager: # If verify_integrity=False, then caller is responsible for checking # all(x.shape[-1] == len(axes[1]) for x in blocks) # sum(x.shape[0] for x in blocks) == len(axes[0]) # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0]))) # all(blk.ndim == 2 for blk in blocks) # This allows us to safely pass verify_integrity=False try: mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity) except ValueError as err: arrays = [blk.values for blk in blocks] tot_items = sum(arr.shape[0] for arr in arrays) raise_construction_error(tot_items, arrays[0].shape[1:], axes, err) if consolidate: mgr._consolidate_inplace() return mgr def ndarray_to_mgr( values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str ) -> Manager: # used in DataFrame.__init__ # input must be a ndarray, list, Series, Index, ExtensionArray if isinstance(values, ABCSeries): if columns is None: if values.name is not None: columns = Index([values.name]) if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # if the array preparation does a copy -> avoid this for ArrayManager, # since the copy is done on conversion to 1D arrays copy_on_sanitize = False if typ == "array" else copy vdtype = getattr(values, "dtype", None) refs = None if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): # GH#19157 if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: # GH#12513 a EA dtype passed with a 2D array, split into # multiple EAs that view the values # error: No overload variant of "__getitem__" of "ExtensionArray" # matches argument type "Tuple[slice, int]" values = [ values[:, n] # type: ignore[call-overload] for n in range(values.shape[1]) ] else: values = [values] if columns is None: columns = Index(range(len(values))) else: columns = ensure_index(columns) return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) elif is_extension_array_dtype(vdtype): # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) # are already caught above values = extract_array(values, extract_numpy=True) if copy: values = values.copy() if values.ndim == 1: values = values.reshape(-1, 1) elif isinstance(values, (ABCSeries, Index)): if not copy_on_sanitize and ( dtype is None or astype_is_view(values.dtype, dtype) ): refs = values._references if copy_on_sanitize: values = values._values.copy() else: values = values._values values = _ensure_2d(values) elif isinstance(values, (np.ndarray, ExtensionArray)): # drop subclass info _copy = ( copy_on_sanitize if (dtype is None or astype_is_view(values.dtype, dtype)) else False ) values = np.array(values, copy=_copy) values = _ensure_2d(values) else: # by definition an array here # the dtypes will be coerced to a single dtype values = _prep_ndarraylike(values, copy=copy_on_sanitize) if dtype is not None and not is_dtype_equal(values.dtype, dtype): # GH#40110 see similar check inside sanitize_array values = sanitize_array( values, None, dtype=dtype, copy=copy_on_sanitize, allow_2d=True, ) # _prep_ndarraylike ensures that values.ndim == 2 at this point index, columns = _get_axes( values.shape[0], values.shape[1], index=index, columns=columns ) _check_values_indices_shape_match(values, index, columns) if typ == "array": if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) if dtype is None and is_object_dtype(values.dtype): arrays = [ ensure_wrapped_if_datetimelike( maybe_infer_to_datetimelike(values[:, i]) ) for i in range(values.shape[1]) ] else: if is_datetime_or_timedelta_dtype(values.dtype): values = ensure_wrapped_if_datetimelike(values) arrays = [values[:, i] for i in range(values.shape[1])] if copy: arrays = [arr.copy() for arr in arrays] return ArrayManager(arrays, [index, columns], verify_integrity=False) values = values.T # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type if dtype is None and is_object_dtype(values.dtype): obj_columns = list(values) maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] # don't convert (and copy) the objects if no type inference occurs if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] block_values = [ new_block_2d(dvals_list[n], placement=BlockPlacement(n)) for n in range(len(dvals_list)) ] else: bp = BlockPlacement(slice(len(columns))) nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] else: bp = BlockPlacement(slice(len(columns))) nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] if len(columns) == 0: # TODO: check len(values) == 0? block_values = [] return create_block_manager_from_blocks( block_values, [columns, index], verify_integrity=False )
null
173,092
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def arrays_to_mgr( arrays, columns: Index, index, *, dtype: DtypeObj | None = None, verify_integrity: bool = True, typ: str | None = None, consolidate: bool = True, ) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if verify_integrity: # figure out the index, if necessary if index is None: index = _extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays, refs = _homogenize(arrays, index, dtype) # _homogenize ensures # - all(len(x) == len(index) for x in arrays) # - all(x.ndim == 1 for x in arrays) # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) # - all(type(x) is not PandasArray for x in arrays) else: index = ensure_index(index) arrays = [extract_array(x, extract_numpy=True) for x in arrays] # with _from_arrays, the passed arrays should never be Series objects refs = [None] * len(arrays) # Reached via DataFrame._from_arrays; we do minimal validation here for arr in arrays: if ( not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index) ): raise ValueError( "Arrays must be 1-dimensional np.ndarray or ExtensionArray " "with length matching len(index)" ) columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(arrays) must match len(columns)") # from BlockManager perspective axes = [columns, index] if typ == "block": return create_block_manager_from_column_arrays( arrays, axes, consolidate=consolidate, refs=refs ) elif typ == "array": return ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") def _extract_index(data) -> Index: """ Try to infer an Index from the passed data, raise ValueError on failure. """ index: Index if len(data) == 0: return default_index(0) raw_lengths = [] indexes: list[list[Hashable] | Index] = [] have_raw_arrays = False have_series = False have_dicts = False for val in data: if isinstance(val, ABCSeries): have_series = True indexes.append(val.index) elif isinstance(val, dict): have_dicts = True indexes.append(list(val.keys())) elif is_list_like(val) and getattr(val, "ndim", 1) == 1: have_raw_arrays = True raw_lengths.append(len(val)) elif isinstance(val, np.ndarray) and val.ndim > 1: raise ValueError("Per-column arrays must each be 1-dimensional") if not indexes and not raw_lengths: raise ValueError("If using all scalar values, you must pass an index") if have_series: index = union_indexes(indexes) elif have_dicts: index = union_indexes(indexes, sort=False) if have_raw_arrays: lengths = list(set(raw_lengths)) if len(lengths) > 1: raise ValueError("All arrays must be of the same length") if have_dicts: raise ValueError( "Mixing dicts with non-Series may lead to ambiguous ordering." ) if have_series: if lengths[0] != len(index): msg = ( f"array length {lengths[0]} does not match index " f"length {len(index)}" ) raise ValueError(msg) else: index = default_index(lengths[0]) return ensure_index(index) Any = object() class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... DtypeObj = Union[np.dtype, "ExtensionDtype"] Manager = Union[ "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" ] def construct_1d_arraylike_from_scalar( value: Scalar, length: int, dtype: DtypeObj | None ) -> ArrayLike: """ create a np.ndarray / pandas type of specified shape and dtype filled with values Parameters ---------- value : scalar value length : int dtype : pandas_dtype or np.dtype Returns ------- np.ndarray / pandas type of length, filled with value """ if dtype is None: try: dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True) except OutOfBoundsDatetime: dtype = _dtype_obj if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() seq = [] if length == 0 else [value] subarr = cls._from_sequence(seq, dtype=dtype).repeat(length) else: if length and is_integer_dtype(dtype) and isna(value): # coerce if we have nan for an integer dtype dtype = np.dtype("float64") elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"): # we need to coerce to object dtype to avoid # to allow numpy to take our string as a scalar value dtype = np.dtype("object") if not isna(value): value = ensure_str(value) elif dtype.kind in ["M", "m"]: value = _maybe_box_and_unbox_datetimelike(value, dtype) subarr = np.empty(length, dtype=dtype) if length: # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes subarr.fill(value) return subarr def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def sanitize_array( data, index: Index | None, dtype: DtypeObj | None = None, copy: bool = False, *, allow_2d: bool = False, ) -> ArrayLike: """ Sanitize input data to an ndarray or ExtensionArray, copy if specified, coerce to the dtype if specified. Parameters ---------- data : Any index : Index or None, default None dtype : np.dtype, ExtensionDtype, or None, default None copy : bool, default False allow_2d : bool, default False If False, raise if we have a 2D Arraylike. Returns ------- np.ndarray or ExtensionArray """ if isinstance(data, ma.MaskedArray): data = sanitize_masked_array(data) if isinstance(dtype, PandasDtype): # Avoid ending up with a PandasArray dtype = dtype.numpy_dtype # extract ndarray or ExtensionArray, ensure we have no PandasArray data = extract_array(data, extract_numpy=True, extract_range=True) if isinstance(data, np.ndarray) and data.ndim == 0: if dtype is None: dtype = data.dtype data = lib.item_from_zerodim(data) elif isinstance(data, range): # GH#16804 data = range_to_ndarray(data) copy = False if not is_list_like(data): if index is None: raise ValueError("index must be specified when data is not list-like") data = construct_1d_arraylike_from_scalar(data, len(index), dtype) return data elif isinstance(data, ABCExtensionArray): # it is already ensured above this is not a PandasArray # Until GH#49309 is fixed this check needs to come before the # ExtensionDtype check if dtype is not None: subarr = data.astype(dtype, copy=copy) elif copy: subarr = data.copy() else: subarr = data elif isinstance(dtype, ExtensionDtype): # create an extension array from its dtype _sanitize_non_ordered(data) cls = dtype.construct_array_type() subarr = cls._from_sequence(data, dtype=dtype, copy=copy) # GH#846 elif isinstance(data, np.ndarray): if isinstance(data, np.matrix): data = data.A if dtype is None: subarr = data if data.dtype == object: subarr = maybe_infer_to_datetimelike(data) if subarr is data and copy: subarr = subarr.copy() else: # we will try to copy by-definition here subarr = _try_cast(data, dtype, copy) elif hasattr(data, "__array__"): # e.g. dask array GH#38645 data = np.array(data, copy=copy) return sanitize_array( data, index=index, dtype=dtype, copy=False, allow_2d=allow_2d, ) else: _sanitize_non_ordered(data) # materialize e.g. generators, convert e.g. tuples, abc.ValueView data = list(data) if len(data) == 0 and dtype is None: # We default to float64, matching numpy subarr = np.array([], dtype=np.float64) elif dtype is not None: subarr = _try_cast(data, dtype, copy) else: subarr = maybe_convert_platform(data) if subarr.dtype == object: subarr = cast(np.ndarray, subarr) subarr = maybe_infer_to_datetimelike(subarr) subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) if isinstance(subarr, np.ndarray): # at this point we should have dtype be None or subarr.dtype == dtype dtype = cast(np.dtype, dtype) subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) return subarr class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `dict_to_mgr` function. Write a Python function `def dict_to_mgr( data: dict, index, columns, *, dtype: DtypeObj | None = None, typ: str = "block", copy: bool = True, ) -> Manager` to solve the following problem: Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. Used in DataFrame.__init__ Here is the function: def dict_to_mgr( data: dict, index, columns, *, dtype: DtypeObj | None = None, typ: str = "block", copy: bool = True, ) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. Used in DataFrame.__init__ """ arrays: Sequence[Any] | Series if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) missing = arrays.isna() if index is None: # GH10856 # raise ValueError if only scalars in dict index = _extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): nan_dtype: DtypeObj if dtype is not None: # calling sanitize_array ensures we don't mix-and-match # NA dtypes midxs = missing.values.nonzero()[0] for i in midxs: arr = sanitize_array(arrays.iat[i], index, dtype=dtype) arrays.iat[i] = arr else: # GH#1783 nan_dtype = np.dtype("object") val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) nmissing = missing.sum() if copy: rhs = [val] * nmissing else: # GH#45369 rhs = [val.copy() for _ in range(nmissing)] arrays.loc[missing] = rhs arrays = list(arrays) columns = ensure_index(columns) else: keys = list(data.keys()) columns = Index(keys) arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays] if copy: if typ == "block": # We only need to copy arrays that will not get consolidated, i.e. # only EA arrays arrays = [x.copy() if isinstance(x, ExtensionArray) else x for x in arrays] else: # dtype check to exclude e.g. range objects, scalars arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. Used in DataFrame.__init__
173,093
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def _get_names_from_index(data) -> Index: has_some_name = any(getattr(s, "name", None) is not None for s in data) if not has_some_name: return default_index(len(data)) index: list[Hashable] = list(range(len(data))) count = 0 for i, s in enumerate(data): n = getattr(s, "name", None) if n is not None: index[i] = n else: index[i] = f"Unnamed {count}" count += 1 return Index(index) def to_arrays( data, columns: Index | None, dtype: DtypeObj | None = None ) -> tuple[list[ArrayLike], Index]: """ Return list of arrays, columns. Returns ------- list[ArrayLike] These will become columns in a DataFrame. Index This will become frame.columns. Notes ----- Ensures that len(result_arrays) == len(result_index). """ if isinstance(data, ABCDataFrame): # see test_from_records_with_index_data, test_from_records_bad_index_column if columns is not None: arrays = [ data._ixs(i, axis=1)._values for i, col in enumerate(data.columns) if col in columns ] else: columns = data.columns arrays = [data._ixs(i, axis=1)._values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): if data.dtype.names is not None: # i.e. numpy structured array columns = ensure_index(data.dtype.names) arrays = [data[name] for name in columns] if len(data) == 0: # GH#42456 the indexing above results in list of 2D ndarrays # TODO: is that an issue with numpy? for i, arr in enumerate(arrays): if arr.ndim == 2: arrays[i] = arr[:, 0] return arrays, columns return [], ensure_index([]) elif isinstance(data, np.ndarray) and data.dtype.names is not None: # e.g. recarray columns = Index(list(data.dtype.names)) arrays = [data[k] for k in columns] return arrays, columns if isinstance(data[0], (list, tuple)): arr = _list_to_arrays(data) elif isinstance(data[0], abc.Mapping): arr, columns = _list_of_dict_to_arrays(data, columns) elif isinstance(data[0], ABCSeries): arr, columns = _list_of_series_to_arrays(data, columns) else: # last ditch effort data = [tuple(x) for x in data] arr = _list_to_arrays(data) content, columns = _finalize_columns_and_data(arr, columns, dtype) return content, columns class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... ArrayLike = Union["ExtensionArray", np.ndarray] DtypeObj = Union[np.dtype, "ExtensionDtype"] ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) def default_index(n: int) -> RangeIndex: rng = range(0, n) return RangeIndex._simple_new(rng, name=None) The provided code snippet includes necessary dependencies for implementing the `nested_data_to_arrays` function. Write a Python function `def nested_data_to_arrays( data: Sequence, columns: Index | None, index: Index | None, dtype: DtypeObj | None, ) -> tuple[list[ArrayLike], Index, Index]` to solve the following problem: Convert a single sequence of arrays to multiple arrays. Here is the function: def nested_data_to_arrays( data: Sequence, columns: Index | None, index: Index | None, dtype: DtypeObj | None, ) -> tuple[list[ArrayLike], Index, Index]: """ Convert a single sequence of arrays to multiple arrays. """ # By the time we get here we have already checked treat_as_nested(data) if is_named_tuple(data[0]) and columns is None: columns = ensure_index(data[0]._fields) arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) if index is None: if isinstance(data[0], ABCSeries): index = _get_names_from_index(data) else: index = default_index(len(data)) return arrays, columns, index
Convert a single sequence of arrays to multiple arrays.
173,094
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) The provided code snippet includes necessary dependencies for implementing the `treat_as_nested` function. Write a Python function `def treat_as_nested(data) -> bool` to solve the following problem: Check if we should use nested_data_to_arrays. Here is the function: def treat_as_nested(data) -> bool: """ Check if we should use nested_data_to_arrays. """ return ( len(data) > 0 and is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1 and not (isinstance(data, ExtensionArray) and data.ndim == 2) )
Check if we should use nested_data_to_arrays.
173,095
from __future__ import annotations from collections import abc from typing import ( Any, Hashable, Sequence, ) import numpy as np from numpy import ma from pandas._libs import lib from pandas._typing import ( ArrayLike, DtypeObj, Manager, npt, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core import ( algorithms, common as com, ) from pandas.core.arrays import ( BooleanArray, ExtensionArray, FloatingArray, IntegerArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, sanitize_array, ) from pandas.core.indexes.api import ( DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, union_indexes, ) from pandas.core.internals.array_manager import ( ArrayManager, SingleArrayManager, ) from pandas.core.internals.blocks import ( BlockPlacement, ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, create_block_manager_from_blocks, create_block_manager_from_column_arrays, ) def asdict(obj: Any) -> Dict[str, Any]: ... def asdict(obj: Any, *, dict_factory: Callable[[List[Tuple[str, Any]]], _T]) -> _T: ... The provided code snippet includes necessary dependencies for implementing the `dataclasses_to_dicts` function. Write a Python function `def dataclasses_to_dicts(data)` to solve the following problem: Converts a list of dataclass instances to a list of dictionaries. Parameters ---------- data : List[Type[dataclass]] Returns -------- list_dict : List[dict] Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] Here is the function: def dataclasses_to_dicts(data): """ Converts a list of dataclass instances to a list of dictionaries. Parameters ---------- data : List[Type[dataclass]] Returns -------- list_dict : List[dict] Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] """ from dataclasses import asdict return list(map(asdict, data))
Converts a list of dataclass instances to a list of dictionaries. Parameters ---------- data : List[Type[dataclass]] Returns -------- list_dict : List[dict] Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
173,096
from __future__ import annotations import re import typing import numpy as np from pandas._typing import DtypeObj import pandas as pd from pandas.api.types import is_datetime64_dtype class ArrowCTypes: """ Enum for Apache Arrow C type format strings. The Arrow C data interface: https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings """ NULL = "n" BOOL = "b" INT8 = "c" UINT8 = "C" INT16 = "s" UINT16 = "S" INT32 = "i" UINT32 = "I" INT64 = "l" UINT64 = "L" FLOAT16 = "e" FLOAT32 = "f" FLOAT64 = "g" STRING = "u" # utf-8 DATE32 = "tdD" DATE64 = "tdm" # Resoulution: # - seconds -> 's' # - milliseconds -> 'm' # - microseconds -> 'u' # - nanoseconds -> 'n' TIMESTAMP = "ts{resolution}:{tz}" TIME = "tt{resolution}" DtypeObj = Union[np.dtype, "ExtensionDtype"] The provided code snippet includes necessary dependencies for implementing the `dtype_to_arrow_c_fmt` function. Write a Python function `def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str` to solve the following problem: Represent pandas `dtype` as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given `dtype`. Here is the function: def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: """ Represent pandas `dtype` as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given `dtype`. """ if isinstance(dtype, pd.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype("O"): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> '<M8[ns]' resolution = re.findall(r"\[(.*)\]", typing.cast(np.dtype, dtype).str)[0][:1] return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") raise NotImplementedError( f"Conversion of {dtype} to Arrow C format string is not implemented." )
Represent pandas `dtype` as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given `dtype`.
173,097
from __future__ import annotations import ctypes import re from typing import Any import numpy as np import pandas as pd from pandas.core.interchange.column import PandasColumn from pandas.core.interchange.dataframe_protocol import ( Buffer, Column, ColumnNullType, DataFrame as DataFrameXchg, DtypeKind, ) from pandas.core.interchange.utils import ( ArrowCTypes, Endianness, ) def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True): """ Build a ``pd.DataFrame`` from the DataFrame interchange object. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame """ pandas_dfs = [] for chunk in df.get_chunks(): pandas_df = protocol_df_chunk_to_pandas(chunk) pandas_dfs.append(pandas_df) if not allow_copy and len(pandas_dfs) > 1: raise RuntimeError( "To join chunks a copy is required which is forbidden by allow_copy=False" ) if len(pandas_dfs) == 1: pandas_df = pandas_dfs[0] else: pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False) index_obj = df.metadata.get("pandas.index", None) if index_obj is not None: pandas_df.index = index_obj return pandas_df class DataFrame(ABC): """ A data frame class, with only the methods required by the interchange protocol defined. A "data frame" represents an ordered collection of named columns. A column's "name" must be a unique string. Columns may be accessed by name or by position. This could be a public data frame class, or an object with the methods and attributes defined on this DataFrame class could be returned from the ``__dataframe__`` method of a public data frame class in a library adhering to the dataframe interchange protocol specification. """ version = 0 # version of the protocol def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): """Construct a new interchange object, potentially changing the parameters.""" def metadata(self) -> dict[str, Any]: """ The metadata for the data frame, as a dictionary with string keys. The contents of `metadata` may be anything, they are meant for a library to store information that it needs to, e.g., roundtrip losslessly or for two implementations to share data that is not (yet) part of the interchange protocol specification. For avoiding collisions with other entries, please add name the keys with the name of the library followed by a period and the desired name, e.g, ``pandas.indexcol``. """ def num_columns(self) -> int: """ Return the number of columns in the DataFrame. """ def num_rows(self) -> int | None: # TODO: not happy with Optional, but need to flag it may be expensive # why include it if it may be None - what do we expect consumers # to do here? """ Return the number of rows in the DataFrame, if available. """ def num_chunks(self) -> int: """ Return the number of chunks the DataFrame consists of. """ def column_names(self) -> Iterable[str]: """ Return an iterator yielding the column names. """ def get_column(self, i: int) -> Column: """ Return the column at the indicated position. """ def get_column_by_name(self, name: str) -> Column: """ Return the column whose name is the indicated name. """ def get_columns(self) -> Iterable[Column]: """ Return an iterator yielding the columns. """ def select_columns(self, indices: Sequence[int]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by index. """ def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by name. """ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: """ Return an iterator yielding the chunks. By default (None), yields the chunks that the data is stored as by the producer. If given, ``n_chunks`` must be a multiple of ``self.num_chunks()``, meaning the producer must subdivide each chunk before yielding it. """ The provided code snippet includes necessary dependencies for implementing the `from_dataframe` function. Write a Python function `def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame` to solve the following problem: Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame Here is the function: def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame: """ Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame """ if isinstance(df, pd.DataFrame): return df if not hasattr(df, "__dataframe__"): raise ValueError("`df` does not support __dataframe__") return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame
173,098
from __future__ import annotations from typing import Literal import warnings from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import maybe_box_native from pandas.core.dtypes.common import ( is_extension_array_dtype, is_object_dtype, ) from pandas import DataFrame from pandas.core import common as com Literal: _SpecialForm = ... def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: """ If passed a scalar cast the scalar to a python native type. Parameters ---------- value : scalar or Series Returns ------- scalar or Series """ if is_float(value): # error: Argument 1 to "float" has incompatible type # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]"; # expected "Union[SupportsFloat, _SupportsIndex, str]" value = float(value) # type: ignore[arg-type] elif is_integer(value): # error: Argument 1 to "int" has incompatible type # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]"; # expected "Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]" value = int(value) # type: ignore[arg-type] elif is_bool(value): value = bool(value) elif isinstance(value, (np.datetime64, np.timedelta64)): value = maybe_box_datetimelike(value) elif value is NA: value = None return value def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None The provided code snippet includes necessary dependencies for implementing the `to_dict` function. Write a Python function `def to_dict( df: DataFrame, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]` to solve the following problem: Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Here is the function: def to_dict( df: DataFrame, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. """ if not df.columns.is_unique: warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, stacklevel=find_stack_level(), ) # GH16122 into_c = com.standardize_mapping(into) # error: Incompatible types in assignment (expression has type "str", # variable has type "Literal['dict', 'list', 'series', 'split', 'tight', # 'records', 'index']") orient = orient.lower() # type: ignore[assignment] if not index and orient not in ["split", "tight"]: raise ValueError( "'index=False' is only valid when 'orient' is 'split' or 'tight'" ) if orient == "series": # GH46470 Return quickly if orient series to avoid creating dtype objects return into_c((k, v) for k, v in df.items()) box_native_indices = [ i for i, col_dtype in enumerate(df.dtypes.values) if is_object_dtype(col_dtype) or is_extension_array_dtype(col_dtype) ] are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes) if orient == "dict": return into_c((k, v.to_dict(into)) for k, v in df.items()) elif orient == "list": object_dtype_indices_as_set = set(box_native_indices) return into_c( ( k, list(map(maybe_box_native, v.tolist())) if i in object_dtype_indices_as_set else v.tolist(), ) for i, (k, v) in enumerate(df.items()) ) elif orient == "split": data = df._create_data_for_split_and_tight_to_dict( are_all_object_dtype_cols, box_native_indices ) return into_c( ((("index", df.index.tolist()),) if index else ()) + ( ("columns", df.columns.tolist()), ("data", data), ) ) elif orient == "tight": data = df._create_data_for_split_and_tight_to_dict( are_all_object_dtype_cols, box_native_indices ) return into_c( ((("index", df.index.tolist()),) if index else ()) + ( ("columns", df.columns.tolist()), ( "data", [ list(map(maybe_box_native, t)) for t in df.itertuples(index=False, name=None) ], ), ) + ((("index_names", list(df.index.names)),) if index else ()) + (("column_names", list(df.columns.names)),) ) elif orient == "records": columns = df.columns.tolist() if are_all_object_dtype_cols: rows = ( dict(zip(columns, row)) for row in df.itertuples(index=False, name=None) ) return [ into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows ] else: data = [ into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None) ] if box_native_indices: object_dtype_indices_as_set = set(box_native_indices) object_dtype_cols = { col for i, col in enumerate(df.columns) if i in object_dtype_indices_as_set } for row in data: for col in object_dtype_cols: row[col] = maybe_box_native(row[col]) return data elif orient == "index": if not df.index.is_unique: raise ValueError("DataFrame index must be unique for orient='index'.") columns = df.columns.tolist() if are_all_object_dtype_cols: return into_c( (t[0], dict(zip(df.columns, map(maybe_box_native, t[1:])))) for t in df.itertuples(name=None) ) elif box_native_indices: object_dtype_indices_as_set = set(box_native_indices) is_object_dtype_by_index = [ i in object_dtype_indices_as_set for i in range(len(df.columns)) ] return into_c( ( t[0], { columns[i]: maybe_box_native(v) if is_object_dtype_by_index[i] else v for i, v in enumerate(t[1:]) }, ) for t in df.itertuples(name=None) ) else: return into_c( (t[0], dict(zip(df.columns, t[1:]))) for t in df.itertuples(name=None) ) else: raise ValueError(f"orient '{orient}' not understood")
Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter.
173,099
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs.tslibs import Timestamp from pandas._typing import ( DtypeObj, NDFrameT, npt, ) from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_extension_array_dtype, is_numeric_dtype, is_timedelta64_dtype, ) import pandas as pd from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles class NDFrameDescriberAbstract(ABC): """Abstract class for describing dataframe or series. Parameters ---------- obj : Series or DataFrame Object to be described. """ def __init__(self, obj: DataFrame | Series) -> None: self.obj = obj def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: """Do describe either series or dataframe. Parameters ---------- percentiles : list-like of numbers The percentiles to include in the output. """ class SeriesDescriber(NDFrameDescriberAbstract): """Class responsible for creating series description.""" obj: Series def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series: describe_func = select_describe_func( self.obj, ) return describe_func(self.obj, percentiles) class DataFrameDescriber(NDFrameDescriberAbstract): """Class responsible for creating dataobj description. Parameters ---------- obj : DataFrame DataFrame to be described. include : 'all', list-like of dtypes or None A white list of data types to include in the result. exclude : list-like of dtypes or None A black list of data types to omit from the result. """ def __init__( self, obj: DataFrame, *, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, ) -> None: self.include = include self.exclude = exclude if obj.ndim == 2 and obj.columns.size == 0: raise ValueError("Cannot describe a DataFrame without columns") super().__init__(obj) def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: data = self._select_data() ldesc: list[Series] = [] for _, series in data.items(): describe_func = select_describe_func(series) ldesc.append(describe_func(series, percentiles)) col_names = reorder_columns(ldesc) d = concat( [x.reindex(col_names, copy=False) for x in ldesc], axis=1, sort=False, ) d.columns = data.columns.copy() return d def _select_data(self): """Select columns to be described.""" if (self.include is None) and (self.exclude is None): # when some numerics are found, keep only numerics default_include: list[npt.DTypeLike] = [np.number, "datetime"] data = self.obj.select_dtypes(include=default_include) if len(data.columns) == 0: data = self.obj elif self.include == "all": if self.exclude is not None: msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self.obj else: data = self.obj.select_dtypes( include=self.include, exclude=self.exclude, ) return data def refine_percentiles( percentiles: Sequence[float] | np.ndarray | None, ) -> np.ndarray[Any, np.dtype[np.float64]]: """ Ensure that percentiles are unique and sorted. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. """ if percentiles is None: return np.array([0.25, 0.5, 0.75]) # explicit conversion of `percentiles` to list percentiles = list(percentiles) # get them all to be in [0, 1] validate_percentile(percentiles) # median should always be included if 0.5 not in percentiles: percentiles.append(0.5) percentiles = np.asarray(percentiles) # sort and check for duplicates unique_pcts = np.unique(percentiles) assert percentiles is not None if len(unique_pcts) < len(percentiles): raise ValueError("percentiles cannot contain duplicates") return unique_pcts class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... NDFrameT = TypeVar("NDFrameT", bound="NDFrame") The provided code snippet includes necessary dependencies for implementing the `describe_ndframe` function. Write a Python function `def describe_ndframe( *, obj: NDFrameT, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, percentiles: Sequence[float] | np.ndarray | None, ) -> NDFrameT` to solve the following problem: Describe series or dataframe. Called from pandas.core.generic.NDFrame.describe() Parameters ---------- obj: DataFrame or Series Either dataframe or series to be described. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. Returns ------- Dataframe or series description. Here is the function: def describe_ndframe( *, obj: NDFrameT, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, percentiles: Sequence[float] | np.ndarray | None, ) -> NDFrameT: """Describe series or dataframe. Called from pandas.core.generic.NDFrame.describe() Parameters ---------- obj: DataFrame or Series Either dataframe or series to be described. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. Returns ------- Dataframe or series description. """ percentiles = refine_percentiles(percentiles) describer: NDFrameDescriberAbstract if obj.ndim == 1: describer = SeriesDescriber( obj=cast("Series", obj), ) else: describer = DataFrameDescriber( obj=cast("DataFrame", obj), include=include, exclude=exclude, ) result = describer.describe(percentiles=percentiles) return cast(NDFrameT, result)
Describe series or dataframe. Called from pandas.core.generic.NDFrame.describe() Parameters ---------- obj: DataFrame or Series Either dataframe or series to be described. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. Returns ------- Dataframe or series description.
173,100
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs.tslibs import Timestamp from pandas._typing import ( DtypeObj, NDFrameT, npt, ) from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_extension_array_dtype, is_numeric_dtype, is_timedelta64_dtype, ) import pandas as pd from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `reorder_columns` function. Write a Python function `def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]` to solve the following problem: Set a convenient order for rows for display. Here is the function: def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: """Set a convenient order for rows for display.""" names: list[Hashable] = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if name not in names: names.append(name) return names
Set a convenient order for rows for display.
173,101
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs.tslibs import Timestamp from pandas._typing import ( DtypeObj, NDFrameT, npt, ) from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_extension_array_dtype, is_numeric_dtype, is_timedelta64_dtype, ) import pandas as pd from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `describe_timestamp_as_categorical_1d` function. Write a Python function `def describe_timestamp_as_categorical_1d( data: Series, percentiles_ignored: Sequence[float], ) -> Series` to solve the following problem: Describe series containing timestamp data treated as categorical. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface. Here is the function: def describe_timestamp_as_categorical_1d( data: Series, percentiles_ignored: Sequence[float], ) -> Series: """Describe series containing timestamp data treated as categorical. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface. """ names = ["count", "unique"] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) result = [data.count(), count_unique] dtype = None if count_unique > 0: top, freq = objcounts.index[0], objcounts.iloc[0] tz = data.dt.tz asint = data.dropna().values.view("i8") top = Timestamp(top) if top.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware top = top.tz_convert(tz) else: top = top.tz_localize(tz) names += ["top", "freq", "first", "last"] result += [ top, freq, Timestamp(asint.min(), tz=tz), Timestamp(asint.max(), tz=tz), ] # If the DataFrame is empty, set 'top' and 'freq' to None # to maintain output shape consistency else: names += ["top", "freq"] result += [np.nan, np.nan] dtype = "object" from pandas import Series return Series(result, index=names, name=data.name, dtype=dtype)
Describe series containing timestamp data treated as categorical. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface.
173,102
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs.tslibs import Timestamp from pandas._typing import ( DtypeObj, NDFrameT, npt, ) from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_extension_array_dtype, is_numeric_dtype, is_timedelta64_dtype, ) import pandas as pd from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: """Describe series containing numerical data. Parameters ---------- series : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output. """ from pandas import Series formatted_percentiles = format_percentiles(percentiles) stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"] d = ( [series.count(), series.mean(), series.std(), series.min()] + series.quantile(percentiles).tolist() + [series.max()] ) # GH#48340 - always return float on non-complex numeric data dtype: DtypeObj | None if is_extension_array_dtype(series): dtype = pd.Float64Dtype() elif is_numeric_dtype(series) and not is_complex_dtype(series): dtype = np.dtype("float") else: dtype = None return Series(d, index=stat_index, name=series.name, dtype=dtype) def describe_categorical_1d( data: Series, percentiles_ignored: Sequence[float], ) -> Series: """Describe series containing categorical data. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface. """ names = ["count", "unique", "top", "freq"] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) if count_unique > 0: top, freq = objcounts.index[0], objcounts.iloc[0] dtype = None else: # If the DataFrame is empty, set 'top' and 'freq' to None # to maintain output shape consistency top, freq = np.nan, np.nan dtype = "object" result = [data.count(), count_unique, top, freq] from pandas import Series return Series(result, index=names, name=data.name, dtype=dtype) def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: """Describe series containing datetime64 dtype. Parameters ---------- data : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output. """ # GH-30164 from pandas import Series formatted_percentiles = format_percentiles(percentiles) stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"] d = ( [data.count(), data.mean(), data.min()] + data.quantile(percentiles).tolist() + [data.max()] ) return Series(d, index=stat_index, name=data.name) class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) def is_timedelta64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the timedelta64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the timedelta64 dtype. Examples -------- >>> from pandas.core.dtypes.common import is_timedelta64_dtype >>> is_timedelta64_dtype(object) False >>> is_timedelta64_dtype(np.timedelta64) True >>> is_timedelta64_dtype([1, 2, 3]) False >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) True >>> is_timedelta64_dtype('0 days') False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "m" return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) def is_datetime64_any_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- bool Whether or not the array or dtype is of the datetime64 dtype. Examples -------- >>> is_datetime64_any_dtype(str) False >>> is_datetime64_any_dtype(int) False >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive True >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern")) True >>> is_datetime64_any_dtype(np.array(['a', 'b'])) False >>> is_datetime64_any_dtype(np.array([1, 2])) False >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]")) True >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) True """ if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "M" if arr_or_dtype is None: return False return is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype) def is_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a numeric dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a numeric dtype. Examples -------- >>> from pandas.api.types import is_numeric_dtype >>> is_numeric_dtype(str) False >>> is_numeric_dtype(int) True >>> is_numeric_dtype(float) True >>> is_numeric_dtype(np.uint64) True >>> is_numeric_dtype(np.datetime64) False >>> is_numeric_dtype(np.timedelta64) False >>> is_numeric_dtype(np.array(['a', 'b'])) False >>> is_numeric_dtype(pd.Series([1, 2])) True >>> is_numeric_dtype(pd.Index([1, 2.])) True >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) The provided code snippet includes necessary dependencies for implementing the `select_describe_func` function. Write a Python function `def select_describe_func( data: Series, ) -> Callable` to solve the following problem: Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described. Here is the function: def select_describe_func( data: Series, ) -> Callable: """Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described. """ if is_bool_dtype(data.dtype): return describe_categorical_1d elif is_numeric_dtype(data): return describe_numeric_1d elif is_datetime64_any_dtype(data.dtype): return describe_timestamp_1d elif is_timedelta64_dtype(data.dtype): return describe_numeric_1d else: return describe_categorical_1d
Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described.
173,103
from __future__ import annotations import numpy as np from pandas.core.algorithms import unique1d from pandas.core.arrays.categorical import ( Categorical, CategoricalDtype, recode_for_categories, ) unique1d = unique ) class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMixin): """ Represent a categorical variable in classic R / S-plus fashion. `Categoricals` can only take on a limited, and usually fixed, number of possible values (`categories`). In contrast to statistical categorical variables, a `Categorical` might have an order, but numerical operations (additions, divisions, ...) are not possible. All values of the `Categorical` are either in `categories` or `np.nan`. Assigning values outside of `categories` will raise a `ValueError`. Order is defined by the order of the `categories`, not lexical order of the values. Parameters ---------- values : list-like The values of the categorical. If categories are given, values not in categories will be replaced with NaN. categories : Index-like (unique), optional The unique categories for this categorical. If not given, the categories are assumed to be the unique values of `values` (sorted, if possible, otherwise in the order in which they appear). ordered : bool, default False Whether or not this categorical is treated as a ordered categorical. If True, the resulting categorical will be ordered. An ordered categorical respects, when sorted, the order of its `categories` attribute (which in turn is the `categories` argument, if provided). dtype : CategoricalDtype An instance of ``CategoricalDtype`` to use for this categorical. Attributes ---------- categories : Index The categories of this categorical codes : ndarray The codes (integer positions, which point to the categories) of this categorical, read only. ordered : bool Whether or not this Categorical is ordered. dtype : CategoricalDtype The instance of ``CategoricalDtype`` storing the ``categories`` and ``ordered``. Methods ------- from_codes __array__ Raises ------ ValueError If the categories do not validate. TypeError If an explicit ``ordered=True`` is given but no `categories` and the `values` are not sortable. See Also -------- CategoricalDtype : Type for categorical data. CategoricalIndex : An Index with an underlying ``Categorical``. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`__ for more. Examples -------- >>> pd.Categorical([1, 2, 3, 1, 2, 3]) [1, 2, 3, 1, 2, 3] Categories (3, int64): [1, 2, 3] >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) ['a', 'b', 'c', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Missing values are not included as a category. >>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan]) >>> c [1, 2, 3, 1, 2, 3, NaN] Categories (3, int64): [1, 2, 3] However, their presence is indicated in the `codes` attribute by code `-1`. >>> c.codes array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8) Ordered `Categoricals` can be sorted according to the custom order of the categories and can have a min and max value. >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True, ... categories=['c', 'b', 'a']) >>> c ['a', 'b', 'c', 'a', 'b', 'c'] Categories (3, object): ['c' < 'b' < 'a'] >>> c.min() 'c' """ # For comparisons, so that numpy uses our implementation if the compare # ops, which raise __array_priority__ = 1000 # tolist is not actually deprecated, just suppressed in the __dir__ _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"]) _typ = "categorical" _dtype: CategoricalDtype def __init__( self, values, categories=None, ordered=None, dtype: Dtype | None = None, fastpath: bool = False, copy: bool = True, ) -> None: dtype = CategoricalDtype._from_values_or_dtype( values, categories, ordered, dtype ) # At this point, dtype is always a CategoricalDtype, but # we may have dtype.categories be None, and we need to # infer categories in a factorization step further below if fastpath: codes = coerce_indexer_dtype(values, dtype.categories) dtype = CategoricalDtype(ordered=False).update_dtype(dtype) super().__init__(codes, dtype) return if not is_list_like(values): # GH#38433 raise TypeError("Categorical input must be list-like") # null_mask indicates missing values we want to exclude from inference. # This means: only missing values in list-likes (not arrays/ndframes). null_mask = np.array(False) # sanitize input if is_categorical_dtype(values): if dtype.categories is None: dtype = CategoricalDtype(values.categories, dtype.ordered) elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): values = com.convert_to_list_like(values) if isinstance(values, list) and len(values) == 0: # By convention, empty lists result in object dtype: values = np.array([], dtype=object) elif isinstance(values, np.ndarray): if values.ndim > 1: # preempt sanitize_array from raising ValueError raise NotImplementedError( "> 1 ndim Categorical are not supported at this time" ) values = sanitize_array(values, None) else: # i.e. must be a list arr = sanitize_array(values, None) null_mask = isna(arr) if null_mask.any(): # We remove null values here, then below will re-insert # them, grep "full_codes" arr_list = [values[idx] for idx in np.where(~null_mask)[0]] # GH#44900 Do not cast to float if we have only missing values if arr_list or arr.dtype == "object": sanitize_dtype = None else: sanitize_dtype = arr.dtype arr = sanitize_array(arr_list, None, dtype=sanitize_dtype) values = arr if dtype.categories is None: try: codes, categories = factorize(values, sort=True) except TypeError as err: codes, categories = factorize(values, sort=False) if dtype.ordered: # raise, as we don't have a sortable data structure and so # the user should give us one by specifying categories raise TypeError( "'values' is not ordered, please " "explicitly specify the categories order " "by passing in a categories argument." ) from err # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) elif is_categorical_dtype(values.dtype): old_codes = extract_array(values)._codes codes = recode_for_categories( old_codes, values.dtype.categories, dtype.categories, copy=copy ) else: codes = _get_codes_for_values(values, dtype.categories) if null_mask.any(): # Reinsert -1 placeholders for previously removed missing values full_codes = -np.ones(null_mask.shape, dtype=codes.dtype) full_codes[~null_mask] = codes codes = full_codes dtype = CategoricalDtype(ordered=False).update_dtype(dtype) arr = coerce_indexer_dtype(codes, dtype.categories) super().__init__(arr, dtype) def dtype(self) -> CategoricalDtype: """ The :class:`~pandas.api.types.CategoricalDtype` for this instance. """ return self._dtype def _internal_fill_value(self) -> int: # using the specific numpy integer instead of python int to get # the correct dtype back from _quantile in the all-NA case dtype = self._ndarray.dtype return dtype.type(-1) def _from_sequence( cls, scalars, *, dtype: Dtype | None = None, copy: bool = False ) -> Categorical: return Categorical(scalars, dtype=dtype, copy=copy) def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ... def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ... def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: """ Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. """ dtype = pandas_dtype(dtype) if self.dtype is dtype: result = self.copy() if copy else self elif is_categorical_dtype(dtype): dtype = cast(CategoricalDtype, dtype) # GH 10696/18593/18630 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self result = self._set_dtype(dtype) elif isinstance(dtype, ExtensionDtype): return super().astype(dtype, copy=copy) elif is_integer_dtype(dtype) and self.isna().any(): raise ValueError("Cannot convert float NaN to integer") elif len(self.codes) == 0 or len(self.categories) == 0: result = np.array( self, dtype=dtype, copy=copy, ) else: # GH8628 (PERF): astype category codes instead of astyping array new_cats = self.categories._values try: new_cats = new_cats.astype(dtype=dtype, copy=copy) fill_value = self.categories._na_value if not is_valid_na_for_dtype(fill_value, dtype): fill_value = lib.item_from_zerodim( np.array(self.categories._na_value).astype(dtype) ) except ( TypeError, # downstream error msg for CategoricalIndex is misleading ValueError, ): msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" raise ValueError(msg) result = take_nd( new_cats, ensure_platform_int(self._codes), fill_value=fill_value ) return result def to_list(self): """ Alias for tolist. """ return self.tolist() def _from_inferred_categories( cls, inferred_categories, inferred_codes, dtype, true_values=None ): """ Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical """ from pandas import ( Index, to_datetime, to_numeric, to_timedelta, ) cats = Index(inferred_categories) known_categories = ( isinstance(dtype, CategoricalDtype) and dtype.categories is not None ) if known_categories: # Convert to a specialized type with `dtype` if specified. if is_any_real_numeric_dtype(dtype.categories): cats = to_numeric(inferred_categories, errors="coerce") elif is_datetime64_dtype(dtype.categories): cats = to_datetime(inferred_categories, errors="coerce") elif is_timedelta64_dtype(dtype.categories): cats = to_timedelta(inferred_categories, errors="coerce") elif is_bool_dtype(dtype.categories): if true_values is None: true_values = ["True", "TRUE", "true"] # error: Incompatible types in assignment (expression has type # "ndarray", variable has type "Index") cats = cats.isin(true_values) # type: ignore[assignment] if known_categories: # Recode from observation order to dtype.categories order. categories = dtype.categories codes = recode_for_categories(inferred_codes, cats, categories) elif not cats.is_monotonic_increasing: # Sort categories and recode for unknown categories. unsorted = cats.copy() categories = cats.sort_values() codes = recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls(codes, dtype=dtype, fastpath=True) def from_codes( cls, codes, categories=None, ordered=None, dtype: Dtype | None = None ) -> Categorical: """ Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like of int An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. Returns ------- Categorical Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) ['a', 'b', 'a', 'b'] Categories (2, object): ['a' < 'b'] """ dtype = CategoricalDtype._from_values_or_dtype( categories=categories, ordered=ordered, dtype=dtype ) if dtype.categories is None: msg = ( "The categories must be provided in 'categories' or " "'dtype'. Both were None." ) raise ValueError(msg) if is_extension_array_dtype(codes) and is_integer_dtype(codes): # Avoid the implicit conversion of Int to object if isna(codes).any(): raise ValueError("codes cannot contain NA values") codes = codes.to_numpy(dtype=np.int64) else: codes = np.asarray(codes) if len(codes) and not is_integer_dtype(codes): raise ValueError("codes need to be array-like integers") if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and len(categories)-1") return cls(codes, dtype=dtype, fastpath=True) # ------------------------------------------------------------------ # Categories/Codes/Ordered def categories(self) -> Index: """ The categories of this categorical. Setting assigns new values to each category (effectively a rename of each individual category). The assigned value has to be a list-like object. All items must be unique and the number of items in the new categories must be the same as the number of items in the old categories. Raises ------ ValueError If the new categories do not validate as categories or if the number of new categories is unequal the number of old categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. """ return self.dtype.categories def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. """ return self.dtype.ordered def codes(self) -> np.ndarray: """ The category codes of this categorical. Codes are an array of integers which are the positions of the actual values in the categories array. There is no setter, use the other categorical methods and the normal item setter to change values in the categorical. Returns ------- ndarray[int] A non-writable view of the `codes` array. """ v = self._codes.view() v.flags.writeable = False return v def _set_categories(self, categories, fastpath: bool = False) -> None: """ Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c ['a', 'b'] Categories (2, object): ['a', 'b'] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c ['a', 'c'] Categories (2, object): ['a', 'c'] """ if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if ( not fastpath and self.dtype.categories is not None and len(new_dtype.categories) != len(self.dtype.categories) ): raise ValueError( "new categories need to have the same number of " "items as the old categories!" ) super().__init__(self._ndarray, new_dtype) def _set_dtype(self, dtype: CategoricalDtype) -> Categorical: """ Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of `CategoricalDtype`. """ codes = recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)(codes, dtype=dtype, fastpath=True) def set_ordered(self, value: bool) -> Categorical: """ Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). """ new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self.copy() NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) return cat def as_ordered(self) -> Categorical: """ Set the Categorical to be ordered. Returns ------- Categorical Ordered Categorical. """ return self.set_ordered(True) def as_unordered(self) -> Categorical: """ Set the Categorical to be unordered. Returns ------- Categorical Unordered Categorical. """ return self.set_ordered(False) def set_categories(self, new_categories, ordered=None, rename: bool = False): """ Set the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values set to NaN). If `rename==True`, the categories will simple be renamed (less or more items than in old categories will result in values set to NaN or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. Returns ------- Categorical with reordered categories. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. """ if ordered is None: ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = self.copy() if rename: if cat.dtype.categories is not None and len(new_dtype.categories) < len( cat.dtype.categories ): # remove all _codes which are larger and set to -1/NaN cat._codes[cat._codes >= len(new_dtype.categories)] = -1 codes = cat._codes else: codes = recode_for_categories( cat.codes, cat.categories, new_dtype.categories ) NDArrayBacked.__init__(cat, codes, new_dtype) return cat def rename_categories(self, new_categories) -> Categorical: """ Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable New categories which will replace old categories. * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. Returns ------- Categorical Categorical with renamed categories. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) ['A', 'A', 'b'] Categories (2, object): ['A', 'b'] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) ['A', 'A', 'B'] Categories (2, object): ['A', 'B'] """ if is_dict_like(new_categories): new_categories = [ new_categories.get(item, item) for item in self.categories ] elif callable(new_categories): new_categories = [new_categories(item) for item in self.categories] cat = self.copy() cat._set_categories(new_categories) return cat def reorder_categories(self, new_categories, ordered=None): """ Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. """ if ( len(self.categories) != len(new_categories) or not self.categories.difference(new_categories).empty ): raise ValueError( "items in new_categories are not the same as in old categories" ) return self.set_categories(new_categories, ordered=ordered) def add_categories(self, new_categories) -> Categorical: """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. Returns ------- Categorical Categorical with new categories added. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['c', 'b', 'c']) >>> c ['c', 'b', 'c'] Categories (2, object): ['b', 'c'] >>> c.add_categories(['d', 'a']) ['c', 'b', 'c'] Categories (4, object): ['b', 'c', 'd', 'a'] """ if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: raise ValueError( f"new categories must not include old categories: {already_included}" ) if hasattr(new_categories, "dtype"): from pandas import Series dtype = find_common_type( [self.dtype.categories.dtype, new_categories.dtype] ) new_categories = Series( list(self.dtype.categories) + list(new_categories), dtype=dtype ) else: new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self.copy() codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories) NDArrayBacked.__init__(cat, codes, new_dtype) return cat def remove_categories(self, removals): """ Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. Returns ------- Categorical Categorical with removed categories. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_categories(['d', 'a']) [NaN, 'c', 'b', 'c', NaN] Categories (2, object): ['b', 'c'] """ from pandas import Index if not is_list_like(removals): removals = [removals] removals = Index(removals).unique().dropna() new_categories = self.dtype.categories.difference(removals) not_included = removals.difference(self.dtype.categories) if len(not_included) != 0: not_included = set(not_included) raise ValueError(f"removals must all be in old categories: {not_included}") return self.set_categories(new_categories, ordered=self.ordered, rename=False) def remove_unused_categories(self) -> Categorical: """ Remove categories which are not used. Returns ------- Categorical Categorical with unused categories dropped. See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c[2] = 'a' >>> c[4] = 'c' >>> c ['a', 'c', 'a', 'c', 'c'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_unused_categories() ['a', 'c', 'a', 'c', 'c'] Categories (2, object): ['a', 'c'] """ idx, inv = np.unique(self._codes, return_inverse=True) if idx.size != 0 and idx[0] == -1: # na sentinel idx, inv = idx[1:], inv - 1 new_categories = self.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath( new_categories, ordered=self.ordered ) new_codes = coerce_indexer_dtype(inv, new_dtype.categories) cat = self.copy() NDArrayBacked.__init__(cat, new_codes, new_dtype) return cat # ------------------------------------------------------------------ def map(self, mapper): """ Map categories using an input mapping or function. Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> cat.map(lambda x: x.upper()) ['A', 'B', 'C'] Categories (3, object): ['A', 'B', 'C'] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) ['first', 'second', 'third'] Categories (3, object): ['first', 'second', 'third'] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a' < 'b' < 'c'] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') """ new_categories = self.categories.map(mapper) try: return self.from_codes( self._codes.copy(), categories=new_categories, ordered=self.ordered ) except ValueError: # NA values are represented in self._codes with -1 # np.take causes NA values to take final element in new_categories if np.any(self._codes == -1): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes) __eq__ = _cat_compare_op(operator.eq) __ne__ = _cat_compare_op(operator.ne) __lt__ = _cat_compare_op(operator.lt) __gt__ = _cat_compare_op(operator.gt) __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) # ------------------------------------------------------------- # Validators; ideally these can be de-duplicated def _validate_setitem_value(self, value): if not is_hashable(value): # wrap scalars and hashable-listlikes in list return self._validate_listlike(value) else: return self._validate_scalar(value) def _validate_scalar(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. Parameters ---------- fill_value : object Returns ------- fill_value : int Raises ------ TypeError """ if is_valid_na_for_dtype(fill_value, self.categories.dtype): fill_value = -1 elif fill_value in self.categories: fill_value = self._unbox_scalar(fill_value) else: raise TypeError( "Cannot setitem on a Categorical with a new " f"category ({fill_value}), set the categories first" ) from None return fill_value # ------------------------------------------------------------- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: """ The numpy array interface. Returns ------- numpy.array A numpy array of either the specified dtype or, if dtype==None (default), the same dtype as categorical.categories.dtype. """ ret = take_nd(self.categories._values, self._codes) if dtype and not is_dtype_equal(dtype, self.categories.dtype): return np.asarray(ret, dtype) # When we're a Categorical[ExtensionArray], like Interval, # we need to ensure __array__ gets all the way to an # ndarray. return np.asarray(ret) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): # for binary ops, use our custom dunder methods result = ops.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_numpy_ufuncs_out return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": # e.g. TestCategoricalAnalytics::test_min_max_ordered result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result # for all other cases, raise for now (similarly as what happens in # Series.__array_prepare__) raise TypeError( f"Object with dtype {self.dtype} cannot perform " f"the numpy op {ufunc.__name__}" ) def __setstate__(self, state) -> None: """Necessary for making this object picklable""" if not isinstance(state, dict): return super().__setstate__(state) if "_dtype" not in state: state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"]) if "_codes" in state and "_ndarray" not in state: # backward compat, changed what is property vs attribute state["_ndarray"] = state.pop("_codes") super().__setstate__(state) def nbytes(self) -> int: return self._codes.nbytes + self.dtype.categories.values.nbytes def memory_usage(self, deep: bool = False) -> int: """ Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) def isna(self) -> np.ndarray: """ Detect missing values Missing values (-1 in .codes) are detected. Returns ------- np.ndarray[bool] of whether my values are null See Also -------- isna : Top-level isna. isnull : Alias of isna. Categorical.notna : Boolean inverse of Categorical.isna. """ return self._codes == -1 isnull = isna def notna(self) -> np.ndarray: """ Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- np.ndarray[bool] of whether my values are not null See Also -------- notna : Top-level notna. notnull : Alias of notna. Categorical.isna : Boolean inverse of Categorical.notna. """ return ~self.isna() notnull = notna def value_counts(self, dropna: bool = True) -> Series: """ Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts """ from pandas import ( CategoricalIndex, Series, ) code, cat = self._codes, self.categories ncat, mask = (len(cat), code >= 0) ix, clean = np.arange(ncat), mask.all() if dropna or clean: obs = code if clean else code[mask] count = np.bincount(obs, minlength=ncat or 0) else: count = np.bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) ix = coerce_indexer_dtype(ix, self.dtype.categories) ix = self._from_backing_data(ix) return Series( count, index=CategoricalIndex(ix), dtype="int64", name="count", copy=False ) # error: Argument 2 of "_empty" is incompatible with supertype # "NDArrayBackedExtensionArray"; supertype defines the argument type as # "ExtensionDtype" def _empty( # type: ignore[override] cls: type_t[Categorical], shape: Shape, dtype: CategoricalDtype ) -> Categorical: """ Analogous to np.empty(shape, dtype=dtype) Parameters ---------- shape : tuple[int] dtype : CategoricalDtype """ arr = cls._from_sequence([], dtype=dtype) # We have to use np.zeros instead of np.empty otherwise the resulting # ndarray may contain codes not supported by this dtype, in which # case repr(result) could segfault. backing = np.zeros(shape, dtype=arr._ndarray.dtype) return arr._from_backing_data(backing) def _internal_get_values(self): """ Return the values. For internal compatibility with pandas formatting. Returns ------- np.ndarray or Index A numpy array of the same dtype as categorical.categories.dtype or Index if datetime / periods. """ # if we are a datetime and period index, return Index to keep metadata if needs_i8_conversion(self.categories.dtype): return self.categories.take(self._codes, fill_value=NaT) elif is_integer_dtype(self.categories) and -1 in self._codes: return self.categories.astype("object").take(self._codes, fill_value=np.nan) return np.array(self) def check_for_ordered(self, op) -> None: """assert that we are ordered""" if not self.ordered: raise TypeError( f"Categorical is not ordered for operation {op}\n" "you can use .as_ordered() to change the " "Categorical to an ordered one\n" ) def argsort( self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs ): """ Return the indices that would sort the Categorical. Missing values are sorted at the end. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. **kwargs: passed through to :func:`numpy.argsort`. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort Notes ----- While an ordering is applied to the category values, arg-sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Examples -------- >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() array([2, 0, 1, 3]) >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], ... categories=['c', 'b', 'a'], ... ordered=True) >>> cat.argsort() array([3, 0, 1, 2]) Missing values are placed at the end >>> cat = pd.Categorical([2, None, 1]) >>> cat.argsort() array([2, 0, 1]) """ return super().argsort(ascending=ascending, kind=kind, **kwargs) def sort_values( self, *, inplace: Literal[False] = ..., ascending: bool = ..., na_position: str = ..., ) -> Categorical: ... def sort_values( self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ... ) -> None: ... def sort_values( self, *, inplace: bool = False, ascending: bool = True, na_position: str = "last", ) -> Categorical | None: """ Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2, 2, NaN, 5] Categories (2, int64): [2, 5] >>> c.sort_values() [2, 2, 5, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2, 2, 5] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5, 2, 2] Categories (2, int64): [2, 5] """ inplace = validate_bool_kwarg(inplace, "inplace") if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {repr(na_position)}") sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if not inplace: codes = self._codes[sorted_idx] return self._from_backing_data(codes) self._codes[:] = self._codes[sorted_idx] return None def _rank( self, *, axis: AxisInt = 0, method: str = "average", na_option: str = "keep", ascending: bool = True, pct: bool = False, ): """ See Series.rank.__doc__. """ if axis != 0: raise NotImplementedError vff = self._values_for_rank() return algorithms.rank( vff, axis=axis, method=method, na_option=na_option, ascending=ascending, pct=pct, ) def _values_for_rank(self): """ For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy.array """ from pandas import Series if self.ordered: values = self.codes mask = values == -1 if mask.any(): values = values.astype("float64") values[mask] = np.nan elif is_any_real_numeric_dtype(self.categories): values = np.array(self) else: # reorder the categories (so rank can use the float codes) # instead of passing an object array to rank values = np.array( self.rename_categories( Series(self.categories, copy=False).rank().values ) ) return values # ------------------------------------------------------------------ # NDArrayBackedExtensionArray compat def _codes(self) -> np.ndarray: return self._ndarray def _box_func(self, i: int): if i == -1: return np.NaN return self.categories[i] def _unbox_scalar(self, key) -> int: # searchsorted is very performance sensitive. By converting codes # to same dtype as self.codes, we get much faster performance. code = self.categories.get_loc(key) code = self._ndarray.dtype.type(code) return code # ------------------------------------------------------------------ def __iter__(self) -> Iterator: """ Returns an Iterator over the values of this Categorical. """ if self.ndim == 1: return iter(self._internal_get_values().tolist()) else: return (self[n] for n in range(len(self))) def __contains__(self, key) -> bool: """ Returns True if `key` is in this Categorical. """ # if key is a NaN, check if any NaN is in self. if is_valid_na_for_dtype(key, self.categories.dtype): return bool(self.isna().any()) return contains(self, key, container=self._codes) # ------------------------------------------------------------------ # Rendering Methods def _formatter(self, boxed: bool = False): # Defer to CategoricalFormatter's formatter. return None def _tidy_repr(self, max_vals: int = 10, footer: bool = True) -> str: """ a short repr displaying only max_vals and an optional (but default footer) """ num = max_vals // 2 head = self[:num]._get_repr(length=False, footer=False) tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False) result = f"{head[:-1]}, ..., {tail[1:]}" if footer: result = f"{result}\n{self._repr_footer()}" return str(result) def _repr_categories(self) -> list[str]: """ return the base repr for the categories """ max_categories = ( 10 if get_option("display.max_categories") == 0 else get_option("display.max_categories") ) from pandas.io.formats import format as fmt format_array = partial( fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC ) if len(self.categories) > max_categories: num = max_categories // 2 head = format_array(self.categories[:num]) tail = format_array(self.categories[-num:]) category_strs = head + ["..."] + tail else: category_strs = format_array(self.categories) # Strip all leading spaces, which format_array adds for columns... category_strs = [x.strip() for x in category_strs] return category_strs def _repr_categories_info(self) -> str: """ Returns a string representation of the footer. """ category_strs = self._repr_categories() dtype = str(self.categories.dtype) levheader = f"Categories ({len(self.categories)}, {dtype}): " width, height = get_terminal_size() max_width = get_option("display.width") or width if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" start = True cur_col_len = len(levheader) # header sep_len, sep = (3, " < ") if self.ordered else (2, ", ") linesep = f"{sep.rstrip()}\n" # remove whitespace for val in category_strs: if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: levstring += linesep + (" " * (len(levheader) + 1)) cur_col_len = len(levheader) + 1 # header + a whitespace elif not start: levstring += sep cur_col_len += len(val) levstring += val start = False # replace to simple save space by return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]" def _repr_footer(self) -> str: info = self._repr_categories_info() return f"Length: {len(self)}\n{info}" def _get_repr( self, length: bool = True, na_rep: str = "NaN", footer: bool = True ) -> str: from pandas.io.formats import format as fmt formatter = fmt.CategoricalFormatter( self, length=length, na_rep=na_rep, footer=footer ) result = formatter.to_string() return str(result) def __repr__(self) -> str: """ String representation. """ _maxlen = 10 if len(self._codes) > _maxlen: result = self._tidy_repr(_maxlen) elif len(self._codes) > 0: result = self._get_repr(length=len(self) > _maxlen) else: msg = self._get_repr(length=False, footer=True).replace("\n", ", ") result = f"[], {msg}" return result # ------------------------------------------------------------------ def _validate_listlike(self, value): # NB: here we assume scalar-like tuples have already been excluded value = extract_array(value, extract_numpy=True) # require identical categories set if isinstance(value, Categorical): if not is_dtype_equal(self.dtype, value.dtype): raise TypeError( "Cannot set a Categorical with another, " "without identical categories" ) # is_dtype_equal implies categories_match_up_to_permutation value = self._encode_with_my_categories(value) return value._codes from pandas import Index # tupleize_cols=False for e.g. test_fillna_iterable_category GH#41914 to_add = Index._with_infer(value, tupleize_cols=False).difference( self.categories ) # no assignments of values not in categories, but it's always ok to set # something to np.nan if len(to_add) and not isna(to_add).all(): raise TypeError( "Cannot setitem on a Categorical with a new " "category, set the categories first" ) codes = self.categories.get_indexer(value) return codes.astype(self._ndarray.dtype, copy=False) def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]: """ Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- Dict[Hashable, np.ndarray[np.intp]] dict of categories -> indexers Examples -------- >>> c = pd.Categorical(list('aabca')) >>> c ['a', 'a', 'b', 'c', 'a'] Categories (3, object): ['a', 'b', 'c'] >>> c.categories Index(['a', 'b', 'c'], dtype='object') >>> c.codes array([0, 0, 1, 2, 0], dtype=int8) >>> c._reverse_indexer() {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} """ categories = self.categories r, counts = libalgos.groupsort_indexer( ensure_platform_int(self.codes), categories.size ) counts = ensure_int64(counts).cumsum() _result = (r[start:end] for start, end in zip(counts, counts[1:])) return dict(zip(categories, _result)) # ------------------------------------------------------------------ # Reductions def min(self, *, skipna: bool = True, **kwargs): """ The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical`, NA value if empty """ nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_min((), kwargs) self.check_for_ordered("min") if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].min() else: return np.nan else: pointer = self._codes.min() return self._wrap_reduction_result(None, pointer) def max(self, *, skipna: bool = True, **kwargs): """ The maximum value of the object. Only ordered `Categoricals` have a maximum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- max : the maximum of this `Categorical`, NA if array is empty """ nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_max((), kwargs) self.check_for_ordered("max") if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].max() else: return np.nan else: pointer = self._codes.max() return self._wrap_reduction_result(None, pointer) def _mode(self, dropna: bool = True) -> Categorical: codes = self._codes mask = None if dropna: mask = self.isna() res_codes = algorithms.mode(codes, mask=mask) res_codes = cast(np.ndarray, res_codes) assert res_codes.dtype == codes.dtype res = self._from_backing_data(res_codes) return res # ------------------------------------------------------------------ # ExtensionArray Interface def unique(self): """ Return the ``Categorical`` which ``categories`` and ``codes`` are unique. .. versionchanged:: 1.3.0 Previously, unused categories were dropped from the new categories. Returns ------- Categorical See Also -------- pandas.unique CategoricalIndex.unique Series.unique : Return unique values of Series object. Examples -------- >>> pd.Categorical(list("baabc")).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique() ['b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] """ # pylint: disable=useless-parent-delegation return super().unique() def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray: # make sure we have correct itemsize for resulting codes assert res_values.dtype == self._ndarray.dtype return res_values def equals(self, other: object) -> bool: """ Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool """ if not isinstance(other, Categorical): return False elif self._categories_match_up_to_permutation(other): other = self._encode_with_my_categories(other) return np.array_equal(self._codes, other._codes) return False def _concat_same_type( cls: type[CategoricalT], to_concat: Sequence[CategoricalT], axis: AxisInt = 0 ) -> CategoricalT: from pandas.core.dtypes.concat import union_categoricals first = to_concat[0] if axis >= first.ndim: raise ValueError( f"axis {axis} is out of bounds for array of dimension {first.ndim}" ) if axis == 1: # Flatten, concatenate then reshape if not all(x.ndim == 2 for x in to_concat): raise ValueError # pass correctly-shaped to union_categoricals tc_flat = [] for obj in to_concat: tc_flat.extend([obj[:, i] for i in range(obj.shape[1])]) res_flat = cls._concat_same_type(tc_flat, axis=0) result = res_flat.reshape(len(first), -1, order="F") return result result = union_categoricals(to_concat) return result # ------------------------------------------------------------------ def _encode_with_my_categories(self, other: Categorical) -> Categorical: """ Re-encode another categorical using this Categorical's categories. Notes ----- This assumes we have already checked self._categories_match_up_to_permutation(other). """ # Indexing on codes is more efficient if categories are the same, # so we can apply some optimizations based on the degree of # dtype-matching. codes = recode_for_categories( other.codes, other.categories, self.categories, copy=False ) return self._from_backing_data(codes) def _categories_match_up_to_permutation(self, other: Categorical) -> bool: """ Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- bool """ return hash(self.dtype) == hash(other.dtype) def describe(self) -> DataFrame: """ Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category. """ counts = self.value_counts(dropna=False) freqs = counts / counts.sum() from pandas import Index from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = Index(["counts", "freqs"]) result.index.name = "categories" return result def isin(self, values) -> npt.NDArray[np.bool_]: """ Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- np.ndarray[bool] Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False]) """ if not is_list_like(values): values_type = type(values).__name__ raise TypeError( "only list-like objects are allowed to be passed " f"to isin(), you passed a [{values_type}]" ) values = sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values) def _replace(self, *, to_replace, value, inplace: bool = False): from pandas import Index inplace = validate_bool_kwarg(inplace, "inplace") cat = self if inplace else self.copy() mask = isna(np.asarray(value)) if mask.any(): removals = np.asarray(to_replace)[mask] removals = cat.categories[cat.categories.isin(removals)] new_cat = cat.remove_categories(removals) NDArrayBacked.__init__(cat, new_cat.codes, new_cat.dtype) ser = cat.categories.to_series() ser = ser.replace(to_replace=to_replace, value=value) all_values = Index(ser) # GH51016: maintain order of existing categories idxr = cat.categories.get_indexer_for(all_values) locs = np.arange(len(ser)) locs = np.where(idxr == -1, locs, idxr) locs = locs.argsort() new_categories = ser.take(locs) new_categories = new_categories.drop_duplicates(keep="first") new_categories = Index(new_categories) new_codes = recode_for_categories( cat._codes, all_values, new_categories, copy=False ) new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered) NDArrayBacked.__init__(cat, new_codes, new_dtype) if not inplace: return cat # ------------------------------------------------------------------------ # String methods interface def _str_map( self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True ): # Optimization to apply the callable `f` to the categories once # and rebuild the result by `take`ing from the result with the codes. # Returns the same type as the object-dtype implementation though. from pandas.core.arrays import PandasArray categories = self.categories codes = self.codes result = PandasArray(categories.to_numpy())._str_map(f, na_value, dtype) return take_nd(result, codes, fill_value=na_value) def _str_get_dummies(self, sep: str = "|"): # sep may not be in categories. Just bail on this. from pandas.core.arrays import PandasArray return PandasArray(self.astype(str))._str_get_dummies(sep) delegate=Categorical, accessors=["categories", "ordered"], typ="property" ) ) def recode_for_categories( codes: np.ndarray, old_categories, new_categories, copy: bool = True ) -> np.ndarray: """ Convert a set of codes for to a new set of categories Parameters ---------- codes : np.ndarray old_categories, new_categories : Index copy: bool, default True Whether to copy if the codes are unchanged. Returns ------- new_codes : np.ndarray[np.int64] Examples -------- >>> old_cat = pd.Index(['b', 'a', 'c']) >>> new_cat = pd.Index(['a', 'b']) >>> codes = np.array([0, 1, 1, 2]) >>> recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1], dtype=int8) """ if len(old_categories) == 0: # All null anyway, so just retain the nulls if copy: return codes.copy() return codes elif new_categories.equals(old_categories): # Same categories, so no need to actually recode if copy: return codes.copy() return codes indexer = coerce_indexer_dtype( new_categories.get_indexer(old_categories), new_categories ) new_codes = take_nd(indexer, codes, fill_value=-1) return new_codes The provided code snippet includes necessary dependencies for implementing the `recode_for_groupby` function. Write a Python function `def recode_for_groupby( c: Categorical, sort: bool, observed: bool ) -> tuple[Categorical, Categorical | None]` to solve the following problem: Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : bool The value of the sort parameter groupby was called with. observed : bool Account only for the observed values Returns ------- Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None Here is the function: def recode_for_groupby( c: Categorical, sort: bool, observed: bool ) -> tuple[Categorical, Categorical | None]: """ Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : bool The value of the sort parameter groupby was called with. observed : bool Account only for the observed values Returns ------- Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None """ # we only care about observed values if observed: # In cases with c.ordered, this is equivalent to # return c.remove_unused_categories(), c unique_codes = unique1d(c.codes) take_codes = unique_codes[unique_codes != -1] if sort: take_codes = np.sort(take_codes) # we recode according to the uniques categories = c.categories.take(take_codes) codes = recode_for_categories(c.codes, c.categories, categories) # return a new categorical that maps our new codes # and categories dtype = CategoricalDtype(categories, ordered=c.ordered) return Categorical(codes, dtype=dtype, fastpath=True), c # Already sorted according to c.categories; all is fine if sort: return c, None # sort=False should order groups in as-encountered order (GH-8868) # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories all_codes = np.arange(c.categories.nunique()) # GH 38140: exclude nan from indexer for categories unique_notnan_codes = unique1d(c.codes[c.codes != -1]) if sort: unique_notnan_codes = np.sort(unique_notnan_codes) if len(all_codes) > len(unique_notnan_codes): # GH 13179: All categories need to be present, even if missing from the data missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True) take_codes = np.concatenate((unique_notnan_codes, missing_codes)) else: take_codes = unique_notnan_codes return Categorical(c, c.unique().categories.take(take_codes)), None
Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : bool The value of the sort parameter groupby was called with. observed : bool Account only for the observed values Returns ------- Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None
173,104
from __future__ import annotations from collections import abc from functools import partial from textwrap import dedent from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, Mapping, NamedTuple, Sequence, TypeVar, Union, cast, ) import numpy as np from pandas._libs import ( Interval, lib, reduction as libreduction, ) from pandas._typing import ( ArrayLike, Axis, AxisInt, CorrelationMethod, FillnaOptions, IndexLabel, Manager, Manager2D, SingleManager, TakeIndexer, ) from pandas.errors import SpecificationError from pandas.util._decorators import ( Appender, Substitution, doc, ) from pandas.core.dtypes.common import ( ensure_int64, is_bool, is_categorical_dtype, is_dict_like, is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import algorithms from pandas.core.apply import ( GroupByApply, maybe_mangle_lambdas, reconstruct_func, validate_func_kwargs, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.groupby import base from pandas.core.groupby.groupby import ( GroupBy, GroupByPlot, _agg_template, _apply_docs, _transform_template, ) from pandas.core.indexes.api import ( Index, MultiIndex, all_indexes_same, default_index, ) from pandas.core.series import Series from pandas.core.util.numba_ import maybe_use_numba from pandas.plotting import boxplot_frame_groupby class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series def concat( objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[Series] | Mapping[HashableT, Series], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[1, "columns"], join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = 0, join: str = "outer", ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool | None = None, ) -> DataFrame | Series: """ Concatenate pandas objects along a particular axis. Allows optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series or DataFrame objects If a mapping is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default False Sort non-concatenation axis if it is not already aligned. copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__. It is not recommended to build DataFrames by adding single rows in a for loop. Build a list of rows and make a DataFrame in a single concat. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] Append a single row to the end of a ``DataFrame`` object. >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) >>> df7 a b 0 1 2 >>> new_row = pd.Series({'a': 3, 'b': 4}) >>> new_row a 3 b 4 dtype: int64 >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) a b 0 1 2 1 3 4 """ if copy is None: if using_copy_on_write(): copy = False else: copy = True elif copy and using_copy_on_write(): copy = False op = _Concatenator( objs, axis=axis, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort, ) return op.get_result() def _wrap_transform_general_frame( obj: DataFrame, group: DataFrame, res: DataFrame | Series ) -> DataFrame: from pandas import concat if isinstance(res, Series): # we need to broadcast across the # other dimension; this will preserve dtypes # GH14457 if res.index.is_(obj.index): res_frame = concat([res] * len(group.columns), axis=1) res_frame.columns = group.columns res_frame.index = group.index else: res_frame = obj._constructor( np.tile(res.values, (len(group.index), 1)), columns=group.columns, index=group.index, ) assert isinstance(res_frame, DataFrame) return res_frame elif isinstance(res, DataFrame) and not res.index.is_(group.index): return res._align_frame(group)[0] else: return res
null
173,105
from __future__ import annotations import datetime from functools import ( partial, wraps, ) import inspect from textwrap import dedent from typing import ( TYPE_CHECKING, Callable, Hashable, Iterable, Iterator, List, Literal, Mapping, Sequence, TypeVar, Union, cast, final, ) import warnings import numpy as np from pandas._config.config import option_context from pandas._libs import ( Timestamp, lib, ) from pandas._libs.algos import rank_1d import pandas._libs.groupby as libgroupby from pandas._libs.missing import NA from pandas._typing import ( AnyArrayLike, ArrayLike, Axis, AxisInt, DtypeObj, FillnaOptions, IndexLabel, NDFrameT, PositionalIndexer, RandomState, Scalar, T, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, DataError, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, doc, ) from pandas.core.dtypes.cast import ensure_dtype_can_hold_na from pandas.core.dtypes.common import ( is_bool_dtype, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, sample, ) from pandas.core._numba import executor from pandas.core.arrays import ( BaseMaskedArray, BooleanArray, Categorical, DatetimeArray, ExtensionArray, FloatingArray, TimedeltaArray, ) from pandas.core.base import ( PandasObject, SelectionMixin, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame from pandas.core.groupby import ( base, numba_, ops, ) from pandas.core.groupby.grouper import get_grouper from pandas.core.groupby.indexing import ( GroupByIndexingMixin, GroupByNthSelector, ) from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, RangeIndex, default_index, ) from pandas.core.internals.blocks import ensure_block_shape from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter from pandas.core.util.numba_ import ( get_jit_arguments, maybe_use_numba, ) _KeysArgType = Union[ Hashable, List[Hashable], Callable[[Hashable], Hashable], List[Callable[[Hashable], Hashable]], Mapping[Hashable, Hashable], ] class GroupBy(BaseGroupBy[NDFrameT]): """ Class for grouping and aggregating relational data. See aggregate, transform, and apply functions on this object. It's easiest to use obj.groupby(...) to use GroupBy, but you can also do: :: grouped = groupby(obj, ...) Parameters ---------- obj : pandas object axis : int, default 0 level : int, default None Level of MultiIndex groupings : list of Grouping objects Most users should ignore this exclusions : array-like, optional List of columns to exclude name : str Most users should ignore this Returns ------- **Attributes** groups : dict {group name -> group labels} len(grouped) : int Number of groups Notes ----- After grouping, see aggregate, apply, and transform functions. Here are some other brief notes about usage. When grouping by multiple groups, the result index will be a MultiIndex (hierarchical) by default. Iteration produces (key, group) tuples, i.e. chunking the data by group. So you can write code like: :: grouped = obj.groupby(keys, axis=axis) for key, group in grouped: # do something with the data Function calls on GroupBy, if not specially implemented, "dispatch" to the grouped data. So if you group a DataFrame and wish to invoke the std() method on each group, you can simply do: :: df.groupby(mapper).std() rather than :: df.groupby(mapper).aggregate(np.std) You can pass arguments to these "wrapped" functions, too. See the online documentation for full exposition on these topics and much more """ grouper: ops.BaseGrouper as_index: bool def __init__( self, obj: NDFrameT, keys: _KeysArgType | None = None, axis: Axis = 0, level: IndexLabel | None = None, grouper: ops.BaseGrouper | None = None, exclusions: frozenset[Hashable] | None = None, selection: IndexLabel | None = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> None: self._selection = selection assert isinstance(obj, NDFrame), type(obj) self.level = level if not as_index: if axis != 0: raise ValueError("as_index=False only valid for axis=0") self.as_index = as_index self.keys = keys self.sort = sort self.group_keys = group_keys self.observed = observed self.dropna = dropna if grouper is None: grouper, exclusions, obj = get_grouper( obj, keys, axis=axis, level=level, sort=sort, observed=observed, dropna=self.dropna, ) self.obj = obj self.axis = obj._get_axis_number(axis) self.grouper = grouper self.exclusions = frozenset(exclusions) if exclusions else frozenset() def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self.obj: return self[attr] raise AttributeError( f"'{type(self).__name__}' object has no attribute '{attr}'" ) def _op_via_apply(self, name: str, *args, **kwargs): """Compute the result of an operation by using GroupBy's apply.""" f = getattr(type(self._obj_with_exclusions), name) sig = inspect.signature(f) # a little trickery for aggregation functions that need an axis # argument if "axis" in sig.parameters: if kwargs.get("axis", None) is None or kwargs.get("axis") is lib.no_default: kwargs["axis"] = self.axis def curried(x): return f(x, *args, **kwargs) # preserve the name so we can detect it when calling plot methods, # to avoid duplicates curried.__name__ = name # special case otherwise extra plots are created when catching the # exception below if name in base.plotting_methods: return self.apply(curried) is_transform = name in base.transformation_kernels result = self._python_apply_general( curried, self._obj_with_exclusions, is_transform=is_transform, not_indexed_same=not is_transform, ) if self.grouper.has_dropped_na and is_transform: # result will have dropped rows due to nans, fill with null # and ensure index is ordered same as the input result = self._set_result_index_ordered(result) return result # ----------------------------------------------------------------- # Selection def _iterate_slices(self) -> Iterable[Series]: raise AbstractMethodError(self) # ----------------------------------------------------------------- # Dispatch/Wrapping def _concat_objects( self, values, not_indexed_same: bool = False, is_transform: bool = False, ): from pandas.core.reshape.concat import concat if self.group_keys and not is_transform: if self.as_index: # possible MI return case group_keys = self.grouper.result_index group_levels = self.grouper.levels group_names = self.grouper.names result = concat( values, axis=self.axis, keys=group_keys, levels=group_levels, names=group_names, sort=False, ) else: # GH5610, returns a MI, with the first level being a # range index keys = list(range(len(values))) result = concat(values, axis=self.axis, keys=keys) elif not not_indexed_same: result = concat(values, axis=self.axis) ax = self._selected_obj._get_axis(self.axis) if self.dropna: labels = self.grouper.group_info[0] mask = labels != -1 ax = ax[mask] # this is a very unfortunate situation # we can't use reindex to restore the original order # when the ax has duplicates # so we resort to this # GH 14776, 30667 # TODO: can we re-use e.g. _reindex_non_unique? if ax.has_duplicates and not result.axes[self.axis].equals(ax): # e.g. test_category_order_transformer target = algorithms.unique1d(ax._values) indexer, _ = result.index.get_indexer_non_unique(target) result = result.take(indexer, axis=self.axis) else: result = result.reindex(ax, axis=self.axis, copy=False) else: result = concat(values, axis=self.axis) name = self.obj.name if self.obj.ndim == 1 else self._selection if isinstance(result, Series) and name is not None: result.name = name return result def _set_result_index_ordered( self, result: OutputFrameOrSeries ) -> OutputFrameOrSeries: # set the result index on the passed values object and # return the new object, xref 8046 obj_axis = self.obj._get_axis(self.axis) if self.grouper.is_monotonic and not self.grouper.has_dropped_na: # shortcut if we have an already ordered grouper result = result.set_axis(obj_axis, axis=self.axis, copy=False) return result # row order is scrambled => sort the rows by position in original index original_positions = Index(self.grouper.result_ilocs()) result = result.set_axis(original_positions, axis=self.axis, copy=False) result = result.sort_index(axis=self.axis) if self.grouper.has_dropped_na: # Add back in any missing rows due to dropna - index here is integral # with values referring to the row of the input so can use RangeIndex result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis) result = result.set_axis(obj_axis, axis=self.axis, copy=False) return result def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame: if isinstance(result, Series): result = result.to_frame() # zip in reverse so we can always insert at loc 0 columns = result.columns for name, lev, in_axis in zip( reversed(self.grouper.names), reversed(self.grouper.get_group_levels()), reversed([grp.in_axis for grp in self.grouper.groupings]), ): # GH #28549 # When using .apply(-), name will be in columns already if in_axis and name not in columns: result.insert(0, name, lev) return result def _indexed_output_to_ndframe( self, result: Mapping[base.OutputKey, ArrayLike] ) -> Series | DataFrame: raise AbstractMethodError(self) def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT: if self.axis == 1: # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy result = result.T if result.index.equals(self.obj.index): # Retain e.g. DatetimeIndex/TimedeltaIndex freq # e.g. test_groupby_crash_on_nunique result.index = self.obj.index.copy() return result def _wrap_aggregated_output( self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None = None, ): """ Wraps the output of GroupBy aggregations into the expected result. Parameters ---------- result : Series, DataFrame Returns ------- Series or DataFrame """ # ATM we do not get here for SeriesGroupBy; when we do, we will # need to require that result.name already match self.obj.name if not self.as_index: # `not self.as_index` is only relevant for DataFrameGroupBy, # enforced in __init__ result = self._insert_inaxis_grouper(result) result = result._consolidate() index = Index(range(self.grouper.ngroups)) else: index = self.grouper.result_index if qs is not None: # We get here with len(qs) != 1 and not self.as_index # in test_pass_args_kwargs index = _insert_quantile_level(index, qs) result.index = index # error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has # incompatible type "Union[Series, DataFrame]"; expected "NDFrameT" res = self._maybe_transpose_result(result) # type: ignore[arg-type] return self._reindex_output(res, qs=qs) def _wrap_applied_output( self, data, values: list, not_indexed_same: bool = False, is_transform: bool = False, ): raise AbstractMethodError(self) # ----------------------------------------------------------------- # numba def _numba_prep(self, data: DataFrame): ids, _, ngroups = self.grouper.group_info sorted_index = get_group_index_sorter(ids, ngroups) sorted_ids = algorithms.take_nd(ids, sorted_index, allow_fill=False) sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() if len(self.grouper.groupings) > 1: raise NotImplementedError( "More than 1 grouping labels are not supported with engine='numba'" ) # GH 46867 index_data = data.index if isinstance(index_data, MultiIndex): group_key = self.grouper.groupings[0].name index_data = index_data.get_level_values(group_key) sorted_index_data = index_data.take(sorted_index).to_numpy() starts, ends = lib.generate_slices(sorted_ids, ngroups) return ( starts, ends, sorted_index_data, sorted_data, ) def _numba_agg_general( self, func: Callable, engine_kwargs: dict[str, bool] | None, *aggregator_args, ): """ Perform groupby with a standard numerical aggregation function (e.g. mean) with Numba. """ if not self.as_index: raise NotImplementedError( "as_index=False is not supported. Use .reset_index() instead." ) if self.axis == 1: raise NotImplementedError("axis=1 is not supported.") data = self._obj_with_exclusions df = data if data.ndim == 2 else data.to_frame() starts, ends, sorted_index, sorted_data = self._numba_prep(df) aggregator = executor.generate_shared_aggregator( func, **get_jit_arguments(engine_kwargs) ) result = aggregator(sorted_data, starts, ends, 0, *aggregator_args) index = self.grouper.result_index if data.ndim == 1: result_kwargs = {"name": data.name} result = result.ravel() else: result_kwargs = {"columns": data.columns} return data._constructor(result, index=index, **result_kwargs) def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): """ Perform groupby transform routine with the numba engine. This routine mimics the data splitting routine of the DataSplitter class to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function. """ data = self._obj_with_exclusions df = data if data.ndim == 2 else data.to_frame() starts, ends, sorted_index, sorted_data = self._numba_prep(df) numba_.validate_udf(func) numba_transform_func = numba_.generate_numba_transform_func( func, **get_jit_arguments(engine_kwargs, kwargs) ) result = numba_transform_func( sorted_data, sorted_index, starts, ends, len(df.columns), *args, ) # result values needs to be resorted to their original positions since we # evaluated the data sorted by group result = result.take(np.argsort(sorted_index), axis=0) index = data.index if data.ndim == 1: result_kwargs = {"name": data.name} result = result.ravel() else: result_kwargs = {"columns": data.columns} return data._constructor(result, index=index, **result_kwargs) def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): """ Perform groupby aggregation routine with the numba engine. This routine mimics the data splitting routine of the DataSplitter class to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function. """ data = self._obj_with_exclusions df = data if data.ndim == 2 else data.to_frame() starts, ends, sorted_index, sorted_data = self._numba_prep(df) numba_.validate_udf(func) numba_agg_func = numba_.generate_numba_agg_func( func, **get_jit_arguments(engine_kwargs, kwargs) ) result = numba_agg_func( sorted_data, sorted_index, starts, ends, len(df.columns), *args, ) index = self.grouper.result_index if data.ndim == 1: result_kwargs = {"name": data.name} result = result.ravel() else: result_kwargs = {"columns": data.columns} res = data._constructor(result, index=index, **result_kwargs) if not self.as_index: res = self._insert_inaxis_grouper(res) res.index = default_index(len(res)) return res # ----------------------------------------------------------------- # apply/agg/transform _apply_docs["template"].format( input="dataframe", examples=_apply_docs["dataframe_examples"] ) ) def apply(self, func, *args, **kwargs) -> NDFrameT: func = com.is_builtin_func(func) if isinstance(func, str): if hasattr(self, func): res = getattr(self, func) if callable(res): return res(*args, **kwargs) elif args or kwargs: raise ValueError(f"Cannot pass arguments to property {func}") return res else: raise TypeError(f"apply func should be callable, not '{func}'") elif args or kwargs: if callable(func): def f(g): with np.errstate(all="ignore"): return func(g, *args, **kwargs) else: raise ValueError( "func must be a callable if args or kwargs are supplied" ) else: f = func # ignore SettingWithCopy here in case the user mutates with option_context("mode.chained_assignment", None): try: result = self._python_apply_general(f, self._selected_obj) except TypeError: # gh-20949 # try again, with .apply acting as a filtering # operation, by excluding the grouping column # This would normally not be triggered # except if the udf is trying an operation that # fails on *some* columns, e.g. a numeric operation # on a string grouper column return self._python_apply_general(f, self._obj_with_exclusions) return result def _python_apply_general( self, f: Callable, data: DataFrame | Series, not_indexed_same: bool | None = None, is_transform: bool = False, is_agg: bool = False, ) -> NDFrameT: """ Apply function f in python space Parameters ---------- f : callable Function to apply data : Series or DataFrame Data to apply f to not_indexed_same: bool, optional When specified, overrides the value of not_indexed_same. Apply behaves differently when the result index is equal to the input index, but this can be coincidental leading to value-dependent behavior. is_transform : bool, default False Indicator for whether the function is actually a transform and should not have group keys prepended. is_agg : bool, default False Indicator for whether the function is an aggregation. When the result is empty, we don't want to warn for this case. See _GroupBy._python_agg_general. Returns ------- Series or DataFrame data after applying f """ values, mutated = self.grouper.apply(f, data, self.axis) if not_indexed_same is None: not_indexed_same = mutated return self._wrap_applied_output( data, values, not_indexed_same, is_transform, ) def _agg_general( self, numeric_only: bool = False, min_count: int = -1, *, alias: str, npfunc: Callable, ): result = self._cython_agg_general( how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count, ) return result.__finalize__(self.obj, method="groupby") def _agg_py_fallback( self, values: ArrayLike, ndim: int, alt: Callable ) -> ArrayLike: """ Fallback to pure-python aggregation if _cython_operation raises NotImplementedError. """ # We get here with a) EADtypes and b) object dtype assert alt is not None if values.ndim == 1: # For DataFrameGroupBy we only get here with ExtensionArray ser = Series(values, copy=False) else: # We only get here with values.dtype == object # TODO: special case not needed with ArrayManager df = DataFrame(values.T) # bc we split object blocks in grouped_reduce, we have only 1 col # otherwise we'd have to worry about block-splitting GH#39329 assert df.shape[1] == 1 # Avoid call to self.values that can occur in DataFrame # reductions; see GH#28949 ser = df.iloc[:, 0] # We do not get here with UDFs, so we know that our dtype # should always be preserved by the implemented aggregations # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype? res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True) if isinstance(values, Categorical): # Because we only get here with known dtype-preserving # reductions, we cast back to Categorical. # TODO: if we ever get "rank" working, exclude it here. res_values = type(values)._from_sequence(res_values, dtype=values.dtype) elif ser.dtype == object: res_values = res_values.astype(object, copy=False) # If we are DataFrameGroupBy and went through a SeriesGroupByPath # then we need to reshape # GH#32223 includes case with IntegerArray values, ndarray res_values # test_groupby_duplicate_columns with object dtype values return ensure_block_shape(res_values, ndim=ndim) def _cython_agg_general( self, how: str, alt: Callable, numeric_only: bool = False, min_count: int = -1, **kwargs, ): # Note: we never get here with how="ohlc" for DataFrameGroupBy; # that goes through SeriesGroupBy data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) def array_func(values: ArrayLike) -> ArrayLike: try: result = self.grouper._cython_operation( "aggregate", values, how, axis=data.ndim - 1, min_count=min_count, **kwargs, ) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions # try to python agg # TODO: shouldn't min_count matter? result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt) return result new_mgr = data.grouped_reduce(array_func) res = self._wrap_agged_manager(new_mgr) out = self._wrap_aggregated_output(res) if self.axis == 1: out = out.infer_objects(copy=False) return out def _cython_transform( self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs ): raise AbstractMethodError(self) def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): return self._transform_with_numba( func, *args, engine_kwargs=engine_kwargs, **kwargs ) # optimized transforms func = com.get_cython_func(func) or func if not isinstance(func, str): return self._transform_general(func, *args, **kwargs) elif func not in base.transform_kernel_allowlist: msg = f"'{func}' is not a valid function name for transform(name)" raise ValueError(msg) elif func in base.cythonized_kernels or func in base.transformation_kernels: # cythonized transform or canned "agg+broadcast" return getattr(self, func)(*args, **kwargs) else: # i.e. func in base.reduction_kernels # GH#30918 Use _transform_fast only when we know func is an aggregation # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. # Temporarily set observed for dealing with categoricals. with com.temp_setattr(self, "observed", True): with com.temp_setattr(self, "as_index", True): # GH#49834 - result needs groups in the index for # _wrap_transform_fast_result result = getattr(self, func)(*args, **kwargs) return self._wrap_transform_fast_result(result) def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT: """ Fast transform path for aggregations. """ obj = self._obj_with_exclusions # for each col, reshape to size of original frame by take operation ids, _, _ = self.grouper.group_info result = result.reindex(self.grouper.result_index, axis=self.axis, copy=False) if self.obj.ndim == 1: # i.e. SeriesGroupBy out = algorithms.take_nd(result._values, ids) output = obj._constructor(out, index=obj.index, name=obj.name) else: # `.size()` gives Series output on DataFrame input, need axis 0 axis = 0 if result.ndim == 1 else self.axis # GH#46209 # Don't convert indices: negative indices need to give rise # to null values in the result output = result._take(ids, axis=axis, convert_indices=False) output = output.set_axis(obj._get_axis(self.axis), axis=axis) return output # ----------------------------------------------------------------- # Utilities def _apply_filter(self, indices, dropna): if len(indices) == 0: indices = np.array([], dtype="int64") else: indices = np.sort(np.concatenate(indices)) if dropna: filtered = self._selected_obj.take(indices, axis=self.axis) else: mask = np.empty(len(self._selected_obj.index), dtype=bool) mask.fill(False) mask[indices.astype(int)] = True # mask fails to broadcast when passed to where; broadcast manually. mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T filtered = self._selected_obj.where(mask) # Fill with NaNs. return filtered def _cumcount_array(self, ascending: bool = True) -> np.ndarray: """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ ids, _, ngroups = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) ids, count = ids[sorter], len(ids) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out if self.grouper.has_dropped_na: out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False)) else: out = out.astype(np.int64, copy=False) rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev] # ----------------------------------------------------------------- def _obj_1d_constructor(self) -> Callable: # GH28330 preserve subclassed Series/DataFrames if isinstance(self.obj, DataFrame): return self.obj._constructor_sliced assert isinstance(self.obj, Series) return self.obj._constructor def _bool_agg(self, val_test: Literal["any", "all"], skipna: bool): """ Shared func to call any / all Cython GroupBy implementations. """ def objs_to_bool(vals: ArrayLike) -> tuple[np.ndarray, type]: if is_object_dtype(vals.dtype) and skipna: # GH#37501: don't raise on pd.NA when skipna=True mask = isna(vals) if mask.any(): # mask on original values computed separately vals = vals.copy() vals[mask] = True elif isinstance(vals, BaseMaskedArray): vals = vals._data vals = vals.astype(bool, copy=False) return vals.view(np.int8), bool def result_to_bool( result: np.ndarray, inference: type, nullable: bool = False, ) -> ArrayLike: if nullable: return BooleanArray(result.astype(bool, copy=False), result == -1) else: return result.astype(inference, copy=False) return self._get_cythonized_result( libgroupby.group_any_all, numeric_only=False, cython_dtype=np.dtype(np.int8), pre_processing=objs_to_bool, post_processing=result_to_bool, val_test=val_test, skipna=skipna, ) def any(self, skipna: bool = True): """ Return True if any value in the group is truthful, else False. Parameters ---------- skipna : bool, default True Flag to ignore nan values during truth testing. Returns ------- Series or DataFrame DataFrame or Series of boolean values, where a value is True if any element is True within its respective group, False otherwise. """ return self._bool_agg("any", skipna) def all(self, skipna: bool = True): """ Return True if all values in the group are truthful, else False. Parameters ---------- skipna : bool, default True Flag to ignore nan values during truth testing. Returns ------- Series or DataFrame DataFrame or Series of boolean values, where a value is True if all elements are True within its respective group, False otherwise. """ return self._bool_agg("all", skipna) def count(self) -> NDFrameT: """ Compute count of group, excluding missing values. Returns ------- Series or DataFrame Count of values within each group. """ data = self._get_data_to_aggregate() ids, _, ngroups = self.grouper.group_info mask = ids != -1 is_series = data.ndim == 1 def hfunc(bvalues: ArrayLike) -> ArrayLike: # TODO(EA2D): reshape would not be necessary with 2D EAs if bvalues.ndim == 1: # EA masked = mask & ~isna(bvalues).reshape(1, -1) else: masked = mask & ~isna(bvalues) counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups) if is_series: assert counted.ndim == 2 assert counted.shape[0] == 1 return counted[0] return counted new_mgr = data.grouped_reduce(hfunc) new_obj = self._wrap_agged_manager(new_mgr) # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in # _wrap_aggregated_output() returns. GH 35028 # e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false with com.temp_setattr(self, "observed", True): result = self._wrap_aggregated_output(new_obj) return self._reindex_output(result, fill_value=0) def mean( self, numeric_only: bool = False, engine: str = "cython", engine_kwargs: dict[str, bool] | None = None, ): """ Compute mean of groups, excluding missing values. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None`` and defaults to ``False``. engine : str, default None * ``'cython'`` : Runs the operation through C-extensions from cython. * ``'numba'`` : Runs the operation through JIT compiled code from numba. * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.4.0 engine_kwargs : dict, default None * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` .. versionadded:: 1.4.0 Returns ------- pandas.Series or pandas.DataFrame %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5], ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) Groupby one column and return the mean of the remaining columns in each group. >>> df.groupby('A').mean() B C A 1 3.0 1.333333 2 4.0 1.500000 Groupby two columns and return the mean of the remaining column. >>> df.groupby(['A', 'B']).mean() C A B 1 2.0 2.0 4.0 1.0 2 3.0 1.0 5.0 2.0 Groupby one column and return the mean of only particular column in the group. >>> df.groupby('A')['B'].mean() A 1 3.0 2 4.0 Name: B, dtype: float64 """ if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_mean return self._numba_agg_general(sliding_mean, engine_kwargs) else: result = self._cython_agg_general( "mean", alt=lambda x: Series(x).mean(numeric_only=numeric_only), numeric_only=numeric_only, ) return result.__finalize__(self.obj, method="groupby") def median(self, numeric_only: bool = False): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None`` and defaults to False. Returns ------- Series or DataFrame Median of values within each group. """ result = self._cython_agg_general( "median", alt=lambda x: Series(x).median(numeric_only=numeric_only), numeric_only=numeric_only, ) return result.__finalize__(self.obj, method="groupby") def std( self, ddof: int = 1, engine: str | None = None, engine_kwargs: dict[str, bool] | None = None, numeric_only: bool = False, ): """ Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Degrees of freedom. engine : str, default None * ``'cython'`` : Runs the operation through C-extensions from cython. * ``'numba'`` : Runs the operation through JIT compiled code from numba. * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.4.0 engine_kwargs : dict, default None * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` .. versionadded:: 1.4.0 numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- Series or DataFrame Standard deviation of values within each group. """ if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_var return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof)) else: def _preprocessing(values): if isinstance(values, BaseMaskedArray): return values._data, None return values, None def _postprocessing( vals, inference, nullable: bool = False, result_mask=None ) -> ArrayLike: if nullable: if result_mask.ndim == 2: result_mask = result_mask[:, 0] return FloatingArray(np.sqrt(vals), result_mask.view(np.bool_)) return np.sqrt(vals) result = self._get_cythonized_result( libgroupby.group_var, cython_dtype=np.dtype(np.float64), numeric_only=numeric_only, needs_counts=True, pre_processing=_preprocessing, post_processing=_postprocessing, ddof=ddof, how="std", ) return result def var( self, ddof: int = 1, engine: str | None = None, engine_kwargs: dict[str, bool] | None = None, numeric_only: bool = False, ): """ Compute variance of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Degrees of freedom. engine : str, default None * ``'cython'`` : Runs the operation through C-extensions from cython. * ``'numba'`` : Runs the operation through JIT compiled code from numba. * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.4.0 engine_kwargs : dict, default None * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` .. versionadded:: 1.4.0 numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- Series or DataFrame Variance of values within each group. """ if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_var return self._numba_agg_general(sliding_var, engine_kwargs, ddof) else: return self._cython_agg_general( "var", alt=lambda x: Series(x).var(ddof=ddof), numeric_only=numeric_only, ddof=ddof, ) def _value_counts( self, subset: Sequence[Hashable] | None = None, normalize: bool = False, sort: bool = True, ascending: bool = False, dropna: bool = True, ) -> DataFrame | Series: """ Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy. SeriesGroupBy additionally supports a bins argument. See the docstring of DataFrameGroupBy.value_counts for a description of arguments. """ if self.axis == 1: raise NotImplementedError( "DataFrameGroupBy.value_counts only handles axis=0" ) name = "proportion" if normalize else "count" df = self.obj obj = self._obj_with_exclusions in_axis_names = { grouping.name for grouping in self.grouper.groupings if grouping.in_axis } if isinstance(obj, Series): _name = obj.name keys = [] if _name in in_axis_names else [obj] else: unique_cols = set(obj.columns) if subset is not None: subsetted = set(subset) clashing = subsetted & set(in_axis_names) if clashing: raise ValueError( f"Keys {clashing} in subset cannot be in " "the groupby column keys." ) doesnt_exist = subsetted - unique_cols if doesnt_exist: raise ValueError( f"Keys {doesnt_exist} in subset do not " f"exist in the DataFrame." ) else: subsetted = unique_cols keys = [ # Can't use .values because the column label needs to be preserved obj.iloc[:, idx] for idx, _name in enumerate(obj.columns) if _name not in in_axis_names and _name in subsetted ] groupings = list(self.grouper.groupings) for key in keys: grouper, _, _ = get_grouper( df, key=key, axis=self.axis, sort=self.sort, observed=False, dropna=dropna, ) groupings += list(grouper.groupings) # Take the size of the overall columns gb = df.groupby( groupings, sort=self.sort, observed=self.observed, dropna=self.dropna, ) result_series = cast(Series, gb.size()) result_series.name = name # GH-46357 Include non-observed categories # of non-grouping columns regardless of `observed` if any( isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex)) and not grouping._observed for grouping in groupings ): levels_list = [ping.result_index for ping in groupings] multi_index, _ = MultiIndex.from_product( levels_list, names=[ping.name for ping in groupings] ).sortlevel() result_series = result_series.reindex(multi_index, fill_value=0) if normalize: # Normalize the results by dividing by the original group sizes. # We are guaranteed to have the first N levels be the # user-requested grouping. levels = list( range(len(self.grouper.groupings), result_series.index.nlevels) ) indexed_group_size = result_series.groupby( result_series.index.droplevel(levels), sort=self.sort, dropna=self.dropna, ).transform("sum") result_series /= indexed_group_size # Handle groups of non-observed categories result_series = result_series.fillna(0.0) if sort: # Sort the values and then resort by the main grouping index_level = range(len(self.grouper.groupings)) result_series = result_series.sort_values(ascending=ascending).sort_index( level=index_level, sort_remaining=False ) result: Series | DataFrame if self.as_index: result = result_series else: # Convert to frame index = result_series.index columns = com.fill_missing_names(index.names) if name in columns: raise ValueError(f"Column label '{name}' is duplicate of result column") result_series.name = name result_series.index = index.set_names(range(len(columns))) result_frame = result_series.reset_index() result_frame.columns = columns + [name] result = result_frame return result.__finalize__(self.obj, method="value_counts") def sem(self, ddof: int = 1, numeric_only: bool = False): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- Series or DataFrame Standard error of the mean of values within each group. """ if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype): raise TypeError( f"{type(self).__name__}.sem called with " f"numeric_only={numeric_only} and dtype {self.obj.dtype}" ) result = self.std(ddof=ddof, numeric_only=numeric_only) if result.ndim == 1: result /= np.sqrt(self.count()) else: cols = result.columns.difference(self.exclusions).unique() counts = self.count() result_ilocs = result.columns.get_indexer_for(cols) count_ilocs = counts.columns.get_indexer_for(cols) result.iloc[:, result_ilocs] /= np.sqrt(counts.iloc[:, count_ilocs]) return result def size(self) -> DataFrame | Series: """ Compute group sizes. Returns ------- DataFrame or Series Number of rows in each group as a Series if as_index is True or a DataFrame if as_index is False. """ result = self.grouper.size() # GH28330 preserve subclassed Series/DataFrames through calls if isinstance(self.obj, Series): result = self._obj_1d_constructor(result, name=self.obj.name) else: result = self._obj_1d_constructor(result) with com.temp_setattr(self, "as_index", True): # size already has the desired behavior in GH#49519, but this makes the # as_index=False path of _reindex_output fail on categorical groupers. result = self._reindex_output(result, fill_value=0) if not self.as_index: # error: Incompatible types in assignment (expression has # type "DataFrame", variable has type "Series") result = result.rename("size").reset_index() # type: ignore[assignment] return result def sum( self, numeric_only: bool = False, min_count: int = 0, engine: str | None = None, engine_kwargs: dict[str, bool] | None = None, ): if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_sum return self._numba_agg_general( sliding_sum, engine_kwargs, ) else: # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in # _agg_general() returns. GH #31422 with com.temp_setattr(self, "observed", True): result = self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="sum", npfunc=np.sum, ) return self._reindex_output(result, fill_value=0) def prod(self, numeric_only: bool = False, min_count: int = 0): return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod ) def min( self, numeric_only: bool = False, min_count: int = -1, engine: str | None = None, engine_kwargs: dict[str, bool] | None = None, ): if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_min_max return self._numba_agg_general(sliding_min_max, engine_kwargs, False) else: return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="min", npfunc=np.min, ) def max( self, numeric_only: bool = False, min_count: int = -1, engine: str | None = None, engine_kwargs: dict[str, bool] | None = None, ): if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_min_max return self._numba_agg_general(sliding_min_max, engine_kwargs, True) else: return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="max", npfunc=np.max, ) def first(self, numeric_only: bool = False, min_count: int = -1): """ Compute the first non-null entry of each column. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. min_count : int, default -1 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame First non-null of values within each group. See Also -------- DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry of each column. pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. Examples -------- >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3], ... D=['3/11/2000', '3/12/2000', '3/13/2000'])) >>> df['D'] = pd.to_datetime(df['D']) >>> df.groupby("A").first() B C D A 1 5.0 1 2000-03-11 3 6.0 3 2000-03-13 >>> df.groupby("A").first(min_count=2) B C D A 1 NaN 1.0 2000-03-11 3 NaN NaN NaT >>> df.groupby("A").first(numeric_only=True) B C A 1 5.0 1 3 6.0 3 """ def first_compat(obj: NDFrameT, axis: AxisInt = 0): def first(x: Series): """Helper function for first item that isn't NA.""" arr = x.array[notna(x.array)] if not len(arr): return np.nan return arr[0] if isinstance(obj, DataFrame): return obj.apply(first, axis=axis) elif isinstance(obj, Series): return first(obj) else: # pragma: no cover raise TypeError(type(obj)) return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="first", npfunc=first_compat, ) def last(self, numeric_only: bool = False, min_count: int = -1): """ Compute the last non-null entry of each column. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. min_count : int, default -1 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame Last non-null of values within each group. See Also -------- DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry of each column. pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. Examples -------- >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) >>> df.groupby("A").last() B C A 1 5.0 2 3 6.0 3 """ def last_compat(obj: NDFrameT, axis: AxisInt = 0): def last(x: Series): """Helper function for last item that isn't NA.""" arr = x.array[notna(x.array)] if not len(arr): return np.nan return arr[-1] if isinstance(obj, DataFrame): return obj.apply(last, axis=axis) elif isinstance(obj, Series): return last(obj) else: # pragma: no cover raise TypeError(type(obj)) return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="last", npfunc=last_compat, ) def ohlc(self) -> DataFrame: """ Compute open, high, low and close values of a group, excluding missing values. For multiple groupings, the result index will be a MultiIndex Returns ------- DataFrame Open, high, low and close values within each group. """ if self.obj.ndim == 1: # self._iterate_slices() yields only self._selected_obj obj = self._selected_obj is_numeric = is_numeric_dtype(obj.dtype) if not is_numeric: raise DataError("No numeric types to aggregate") res_values = self.grouper._cython_operation( "aggregate", obj._values, "ohlc", axis=0, min_count=-1 ) agg_names = ["open", "high", "low", "close"] result = self.obj._constructor_expanddim( res_values, index=self.grouper.result_index, columns=agg_names ) return self._reindex_output(result) result = self._apply_to_column_groupbys( lambda x: x.ohlc(), self._obj_with_exclusions ) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result def describe( self, percentiles=None, include=None, exclude=None, ) -> NDFrameT: obj = self._obj_with_exclusions if len(obj) == 0: described = obj.describe( percentiles=percentiles, include=include, exclude=exclude ) if obj.ndim == 1: result = described else: result = described.unstack() return result.to_frame().T.iloc[:0] with com.temp_setattr(self, "as_index", True): result = self._python_apply_general( lambda x: x.describe( percentiles=percentiles, include=include, exclude=exclude ), obj, not_indexed_same=True, ) if self.axis == 1: return result.T # GH#49256 - properly handle the grouping column(s) result = result.unstack() if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result def resample(self, rule, *args, **kwargs): """ Provide resampling when using a TimeGrouper. Given a grouper, the function resamples it according to a string "string" -> "frequency". See the :ref:`frequency aliases <timeseries.offset_aliases>` documentation for more details. Parameters ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. *args, **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. Returns ------- Grouper Return a new grouper with our resampler appended. See Also -------- Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. Examples -------- >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') >>> df = pd.DataFrame(data=4 * [range(2)], ... index=idx, ... columns=['a', 'b']) >>> df.iloc[2, 0] = 5 >>> df a b 2000-01-01 00:00:00 0 1 2000-01-01 00:01:00 0 1 2000-01-01 00:02:00 5 1 2000-01-01 00:03:00 0 1 Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. >>> df.groupby('a').resample('3T').sum() a b a 0 2000-01-01 00:00:00 0 2 2000-01-01 00:03:00 0 1 5 2000-01-01 00:00:00 5 1 Upsample the series into 30 second bins. >>> df.groupby('a').resample('30S').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:00:30 0 0 2000-01-01 00:01:00 0 1 2000-01-01 00:01:30 0 0 2000-01-01 00:02:00 0 0 2000-01-01 00:02:30 0 0 2000-01-01 00:03:00 0 1 5 2000-01-01 00:02:00 5 1 Resample by month. Values are assigned to the month of the period. >>> df.groupby('a').resample('M').sum() a b a 0 2000-01-31 0 3 5 2000-01-31 5 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> df.groupby('a').resample('3T', closed='right').sum() a b a 0 1999-12-31 23:57:00 0 1 2000-01-01 00:00:00 0 2 5 2000-01-01 00:00:00 5 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. >>> df.groupby('a').resample('3T', closed='right', label='right').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:03:00 0 2 5 2000-01-01 00:03:00 5 1 """ from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, **kwargs) def rolling(self, *args, **kwargs) -> RollingGroupby: """ Return a rolling grouper, providing rolling functionality per group. Parameters ---------- window : int, timedelta, str, offset, or BaseIndexer subclass Size of the moving window. If an integer, the fixed number of observations used for each window. If a timedelta, str, or offset, the time period of each window. Each window will be a variable sized based on the observations included in the time-period. This is only valid for datetimelike indexes. To learn more about the offsets & frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. If a BaseIndexer subclass, the window boundaries based on the defined ``get_window_bounds`` method. Additional rolling keyword arguments, namely ``min_periods``, ``center``, ``closed`` and ``step`` will be passed to ``get_window_bounds``. min_periods : int, default None Minimum number of observations in window required to have a value; otherwise, result is ``np.nan``. For a window that is specified by an offset, ``min_periods`` will default to 1. For a window that is specified by an integer, ``min_periods`` will default to the size of the window. center : bool, default False If False, set the window labels as the right edge of the window index. If True, set the window labels as the center of the window index. win_type : str, default None If ``None``, all points are evenly weighted. If a string, it must be a valid `scipy.signal window function <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__. Certain Scipy window types require additional parameters to be passed in the aggregation function. The additional parameters must match the keywords specified in the Scipy window type method signature. on : str, optional For a DataFrame, a column label or Index level on which to calculate the rolling window, rather than the DataFrame's index. Provided integer column is ignored and excluded from result since an integer index is not used to calculate the rolling window. axis : int or str, default 0 If ``0`` or ``'index'``, roll across the rows. If ``1`` or ``'columns'``, roll across the columns. For `Series` this parameter is unused and defaults to 0. closed : str, default None If ``'right'``, the first point in the window is excluded from calculations. If ``'left'``, the last point in the window is excluded from calculations. If ``'both'``, the no points in the window are excluded from calculations. If ``'neither'``, the first and last points in the window are excluded from calculations. Default ``None`` (``'right'``). method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. Returns ------- RollingGroupby Return a new grouper with our rolling appended. See Also -------- Series.rolling : Calling object with Series data. DataFrame.rolling : Calling object with DataFrames. Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby. Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 2], ... 'B': [1, 2, 3, 4], ... 'C': [0.362, 0.227, 1.267, -0.562]}) >>> df A B C 0 1 1 0.362 1 1 2 0.227 2 2 3 1.267 3 2 4 -0.562 >>> df.groupby('A').rolling(2).sum() B C A 1 0 NaN NaN 1 3.0 0.589 2 2 NaN NaN 3 7.0 0.705 >>> df.groupby('A').rolling(2, min_periods=1).sum() B C A 1 0 1.0 0.362 1 3.0 0.589 2 2 3.0 1.267 3 7.0 0.705 >>> df.groupby('A').rolling(2, on='B').sum() B C A 1 0 1 NaN 1 2 0.589 2 2 3 NaN 3 4 0.705 """ from pandas.core.window import RollingGroupby return RollingGroupby( self._selected_obj, *args, _grouper=self.grouper, _as_index=self.as_index, **kwargs, ) def expanding(self, *args, **kwargs) -> ExpandingGroupby: """ Return an expanding grouper, providing expanding functionality per group. """ from pandas.core.window import ExpandingGroupby return ExpandingGroupby( self._selected_obj, *args, _grouper=self.grouper, **kwargs, ) def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: """ Return an ewm grouper, providing ewm functionality per group. """ from pandas.core.window import ExponentialMovingWindowGroupby return ExponentialMovingWindowGroupby( self._selected_obj, *args, _grouper=self.grouper, **kwargs, ) def _fill(self, direction: Literal["ffill", "bfill"], limit=None): """ Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad : Returns Series with minimum number of char in object. backfill : Backward fill the missing values in the dataset. """ # Need int value for Cython if limit is None: limit = -1 ids, _, _ = self.grouper.group_info sorted_labels = np.argsort(ids, kind="mergesort").astype(np.intp, copy=False) if direction == "bfill": sorted_labels = sorted_labels[::-1] col_func = partial( libgroupby.group_fillna_indexer, labels=ids, sorted_labels=sorted_labels, direction=direction, limit=limit, dropna=self.dropna, ) def blk_func(values: ArrayLike) -> ArrayLike: mask = isna(values) if values.ndim == 1: indexer = np.empty(values.shape, dtype=np.intp) col_func(out=indexer, mask=mask) return algorithms.take_nd(values, indexer) else: # We broadcast algorithms.take_nd analogous to # np.take_along_axis # Note: we only get here with backfill/pad, # so if we have a dtype that cannot hold NAs, # then there will be no -1s in indexer, so we can use # the original dtype (no need to ensure_dtype_can_hold_na) if isinstance(values, np.ndarray): dtype = values.dtype if self.grouper.has_dropped_na: # dropped null groups give rise to nan in the result dtype = ensure_dtype_can_hold_na(values.dtype) out = np.empty(values.shape, dtype=dtype) else: out = type(values)._empty(values.shape, dtype=values.dtype) for i, value_element in enumerate(values): # call group_fillna_indexer column-wise indexer = np.empty(values.shape[1], dtype=np.intp) col_func(out=indexer, mask=mask[i]) out[i, :] = algorithms.take_nd(value_element, indexer) return out mgr = self._get_data_to_aggregate() res_mgr = mgr.apply(blk_func) new_obj = self._wrap_agged_manager(res_mgr) if self.axis == 1: # Only relevant for DataFrameGroupBy new_obj = new_obj.T new_obj.columns = self.obj.columns new_obj.index = self.obj.index return new_obj def ffill(self, limit=None): """ Forward fill the values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series or DataFrame Object with missing values filled. See Also -------- Series.ffill: Returns Series with minimum number of char in object. DataFrame.ffill: Object with missing values filled or None if inplace=True. Series.fillna: Fill NaN values of a Series. DataFrame.fillna: Fill NaN values of a DataFrame. """ return self._fill("ffill", limit=limit) def bfill(self, limit=None): """ Backward fill the values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series or DataFrame Object with missing values filled. See Also -------- Series.bfill : Backward fill the missing values in the dataset. DataFrame.bfill: Backward fill the missing values in the dataset. Series.fillna: Fill NaN values of a Series. DataFrame.fillna: Fill NaN values of a DataFrame. """ return self._fill("bfill", limit=limit) def nth(self) -> GroupByNthSelector: """ Take the nth row from each group if n is an int, otherwise a subset of rows. Can be either a call or an index. dropna is not available with index notation. Index notation accepts a comma separated list of integers and slices. If dropna, will take the nth non-null row, dropna is either 'all' or 'any'; this is equivalent to calling dropna(how=dropna) before the groupby. Parameters ---------- n : int, slice or list of ints and slices A single nth value for the row or a list of nth values or slices. .. versionchanged:: 1.4.0 Added slice and lists containing slices. Added index notation. dropna : {'any', 'all', None}, default None Apply the specified dropna operation before counting which row is the nth row. Only supported if n is an int. Returns ------- Series or DataFrame N-th value within each group. %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) >>> g = df.groupby('A') >>> g.nth(0) A B 0 1 NaN 2 2 3.0 >>> g.nth(1) A B 1 1 2.0 4 2 5.0 >>> g.nth(-1) A B 3 1 4.0 4 2 5.0 >>> g.nth([0, 1]) A B 0 1 NaN 1 1 2.0 2 2 3.0 4 2 5.0 >>> g.nth(slice(None, -1)) A B 0 1 NaN 1 1 2.0 2 2 3.0 Index notation may also be used >>> g.nth[0, 1] A B 0 1 NaN 1 1 2.0 2 2 3.0 4 2 5.0 >>> g.nth[:-1] A B 0 1 NaN 1 1 2.0 2 2 3.0 Specifying `dropna` allows ignoring ``NaN`` values >>> g.nth(0, dropna='any') A B 1 1 2.0 2 2 3.0 When the specified ``n`` is larger than any of the groups, an empty DataFrame is returned >>> g.nth(3, dropna='any') Empty DataFrame Columns: [A, B] Index: [] """ return GroupByNthSelector(self) def _nth( self, n: PositionalIndexer | tuple, dropna: Literal["any", "all", None] = None, ) -> NDFrameT: if not dropna: mask = self._make_mask_from_positional_indexer(n) ids, _, _ = self.grouper.group_info # Drop NA values in grouping mask = mask & (ids != -1) out = self._mask_selected_obj(mask) return out # dropna is truthy if not is_integer(n): raise ValueError("dropna option only supported for an integer argument") if dropna not in ["any", "all"]: # Note: when agg-ing picker doesn't raise this, just returns NaN raise ValueError( "For a DataFrame or Series groupby.nth, dropna must be " "either None, 'any' or 'all', " f"(was passed {dropna})." ) # old behaviour, but with all and any support for DataFrames. # modified in GH 7559 to have better perf n = cast(int, n) dropped = self.obj.dropna(how=dropna, axis=self.axis) # get a new grouper for our dropped obj if self.keys is None and self.level is None: # we don't have the grouper info available # (e.g. we have selected out # a column that is not in the current object) axis = self.grouper.axis grouper = self.grouper.codes_info[axis.isin(dropped.index)] if self.grouper.has_dropped_na: # Null groups need to still be encoded as -1 when passed to groupby nulls = grouper == -1 # error: No overload variant of "where" matches argument types # "Any", "NAType", "Any" values = np.where(nulls, NA, grouper) # type: ignore[call-overload] grouper = Index(values, dtype="Int64") else: # create a grouper with the original parameters, but on dropped # object grouper, _, _ = get_grouper( dropped, key=self.keys, axis=self.axis, level=self.level, sort=self.sort, ) grb = dropped.groupby( grouper, as_index=self.as_index, sort=self.sort, axis=self.axis ) return grb.nth(n) def quantile( self, q: float | AnyArrayLike = 0.5, interpolation: str = "linear", numeric_only: bool = False, ): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]: if is_object_dtype(vals): raise TypeError( "'quantile' cannot be performed against 'object' dtypes!" ) inference: DtypeObj | None = None if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype): out = vals.to_numpy(dtype=float, na_value=np.nan) inference = vals.dtype elif is_integer_dtype(vals.dtype): if isinstance(vals, ExtensionArray): out = vals.to_numpy(dtype=float, na_value=np.nan) else: out = vals inference = np.dtype(np.int64) elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): out = vals.to_numpy(dtype=float, na_value=np.nan) elif needs_i8_conversion(vals.dtype): inference = vals.dtype # In this case we need to delay the casting until after the # np.lexsort below. # error: Incompatible return value type (got # "Tuple[Union[ExtensionArray, ndarray[Any, Any]], Union[Any, # ExtensionDtype]]", expected "Tuple[ndarray[Any, Any], # Optional[Union[dtype[Any], ExtensionDtype]]]") return vals, inference # type: ignore[return-value] elif isinstance(vals, ExtensionArray) and is_float_dtype(vals): inference = np.dtype(np.float64) out = vals.to_numpy(dtype=float, na_value=np.nan) else: out = np.asarray(vals) return out, inference def post_processor( vals: np.ndarray, inference: DtypeObj | None, result_mask: np.ndarray | None, orig_vals: ArrayLike, ) -> ArrayLike: if inference: # Check for edge case if isinstance(orig_vals, BaseMaskedArray): assert result_mask is not None # for mypy if interpolation in {"linear", "midpoint"} and not is_float_dtype( orig_vals ): return FloatingArray(vals, result_mask) else: # Item "ExtensionDtype" of "Union[ExtensionDtype, str, # dtype[Any], Type[object]]" has no attribute "numpy_dtype" # [union-attr] return type(orig_vals)( vals.astype( inference.numpy_dtype # type: ignore[union-attr] ), result_mask, ) elif not ( is_integer_dtype(inference) and interpolation in {"linear", "midpoint"} ): if needs_i8_conversion(inference): # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" vals = vals.astype("i8").view( orig_vals._ndarray.dtype # type: ignore[union-attr] ) # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_from_backing_data" return orig_vals._from_backing_data( # type: ignore[union-attr] vals ) assert isinstance(inference, np.dtype) # for mypy return vals.astype(inference) return vals orig_scalar = is_scalar(q) if orig_scalar: # error: Incompatible types in assignment (expression has type "List[ # Union[float, ExtensionArray, ndarray[Any, Any], Index, Series]]", # variable has type "Union[float, Union[Union[ExtensionArray, ndarray[ # Any, Any]], Index, Series]]") q = [q] # type: ignore[assignment] qs = np.array(q, dtype=np.float64) ids, _, ngroups = self.grouper.group_info nqs = len(qs) func = partial( libgroupby.group_quantile, labels=ids, qs=qs, interpolation=interpolation ) # Put '-1' (NaN) labels as the last group so it does not interfere # with the calculations. Note: length check avoids failure on empty # labels. In that case, the value doesn't matter na_label_for_sorting = ids.max() + 1 if len(ids) > 0 else 0 labels_for_lexsort = np.where(ids == -1, na_label_for_sorting, ids) def blk_func(values: ArrayLike) -> ArrayLike: orig_vals = values if isinstance(values, BaseMaskedArray): mask = values._mask result_mask = np.zeros((ngroups, nqs), dtype=np.bool_) else: mask = isna(values) result_mask = None is_datetimelike = needs_i8_conversion(values.dtype) vals, inference = pre_processor(values) ncols = 1 if vals.ndim == 2: ncols = vals.shape[0] shaped_labels = np.broadcast_to( labels_for_lexsort, (ncols, len(labels_for_lexsort)) ) else: shaped_labels = labels_for_lexsort out = np.empty((ncols, ngroups, nqs), dtype=np.float64) # Get an index of values sorted by values and then labels order = (vals, shaped_labels) sort_arr = np.lexsort(order).astype(np.intp, copy=False) if is_datetimelike: # This casting needs to happen after the lexsort in order # to ensure that NaTs are placed at the end and not the front vals = vals.view("i8").astype(np.float64) if vals.ndim == 1: # Ea is always 1d func( out[0], values=vals, mask=mask, sort_indexer=sort_arr, result_mask=result_mask, ) else: for i in range(ncols): func(out[i], values=vals[i], mask=mask[i], sort_indexer=sort_arr[i]) if vals.ndim == 1: out = out.ravel("K") if result_mask is not None: result_mask = result_mask.ravel("K") else: out = out.reshape(ncols, ngroups * nqs) return post_processor(out, inference, result_mask, orig_vals) data = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile") res_mgr = data.grouped_reduce(blk_func) res = self._wrap_agged_manager(res_mgr) if orig_scalar: # Avoid expensive MultiIndex construction return self._wrap_aggregated_output(res) return self._wrap_aggregated_output(res, qs=qs) def ngroup(self, ascending: bool = True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN` and will be skipped from the count. Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. Returns ------- Series Unique numbers for each group. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]}) >>> df color 0 red 1 None 2 red 3 blue 4 blue 5 red >>> df.groupby("color").ngroup() 0 1.0 1 NaN 2 1.0 3 0.0 4 0.0 5 1.0 dtype: float64 >>> df.groupby("color", dropna=False).ngroup() 0 1 1 2 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby("color", dropna=False).ngroup(ascending=False) 0 1 1 0 2 1 3 2 4 2 5 1 dtype: int64 """ obj = self._obj_with_exclusions index = obj._get_axis(self.axis) comp_ids = self.grouper.group_info[0] dtype: type if self.grouper.has_dropped_na: comp_ids = np.where(comp_ids == -1, np.nan, comp_ids) dtype = np.float64 else: dtype = np.int64 if any(ping._passed_categorical for ping in self.grouper.groupings): # comp_ids reflect non-observed groups, we need only observed comp_ids = rank_1d(comp_ids, ties_method="dense") - 1 result = self._obj_1d_constructor(comp_ids, index, dtype=dtype) if not ascending: result = self.ngroups - 1 - result return result def cumcount(self, ascending: bool = True): """ Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to .. code-block:: python self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Returns ------- Series Sequence number of each element within each group. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64 """ index = self._obj_with_exclusions._get_axis(self.axis) cumcounts = self._cumcount_array(ascending=ascending) return self._obj_1d_constructor(cumcounts, index) def rank( self, method: str = "average", ascending: bool = True, na_option: str = "keep", pct: bool = False, axis: AxisInt = 0, ) -> NDFrameT: """ Provide the rank of values within each group. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group. * min: lowest rank in group. * max: highest rank in group. * first: ranks assigned in order they appear in the array. * dense: like 'min', but rank always increases by 1 between groups. ascending : bool, default True False for ranks by high (1) to low (N). na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are. * top: smallest rank if ascending. * bottom: smallest rank if descending. pct : bool, default False Compute percentage rank of data within each group. axis : int, default 0 The axis of the object over which to compute the rank. Returns ------- DataFrame with ranking of values within each group %(see_also)s Examples -------- >>> df = pd.DataFrame( ... { ... "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], ... "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5], ... } ... ) >>> df group value 0 a 2 1 a 4 2 a 2 3 a 3 4 a 5 5 b 1 6 b 2 7 b 4 8 b 1 9 b 5 >>> for method in ['average', 'min', 'max', 'dense', 'first']: ... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method) >>> df group value average_rank min_rank max_rank dense_rank first_rank 0 a 2 1.5 1.0 2.0 1.0 1.0 1 a 4 4.0 4.0 4.0 3.0 4.0 2 a 2 1.5 1.0 2.0 1.0 2.0 3 a 3 3.0 3.0 3.0 2.0 3.0 4 a 5 5.0 5.0 5.0 4.0 5.0 5 b 1 1.5 1.0 2.0 1.0 1.0 6 b 2 3.0 3.0 3.0 2.0 3.0 7 b 4 4.0 4.0 4.0 3.0 4.0 8 b 1 1.5 1.0 2.0 1.0 2.0 9 b 5 5.0 5.0 5.0 4.0 5.0 """ if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) kwargs = { "ties_method": method, "ascending": ascending, "na_option": na_option, "pct": pct, } if axis != 0: # DataFrame uses different keyword name kwargs["method"] = kwargs.pop("ties_method") f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs) result = self._python_apply_general( f, self._selected_obj, is_transform=True ) return result return self._cython_transform( "rank", numeric_only=False, axis=axis, **kwargs, ) def cumprod(self, axis: Axis = 0, *args, **kwargs) -> NDFrameT: """ Cumulative product for each group. Returns ------- Series or DataFrame """ nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"]) if axis != 0: f = lambda x: x.cumprod(axis=axis, **kwargs) return self._python_apply_general(f, self._selected_obj, is_transform=True) return self._cython_transform("cumprod", **kwargs) def cumsum(self, axis: Axis = 0, *args, **kwargs) -> NDFrameT: """ Cumulative sum for each group. Returns ------- Series or DataFrame """ nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"]) if axis != 0: f = lambda x: x.cumsum(axis=axis, **kwargs) return self._python_apply_general(f, self._selected_obj, is_transform=True) return self._cython_transform("cumsum", **kwargs) def cummin( self, axis: AxisInt = 0, numeric_only: bool = False, **kwargs ) -> NDFrameT: """ Cumulative min for each group. Returns ------- Series or DataFrame """ skipna = kwargs.get("skipna", True) if axis != 0: f = lambda x: np.minimum.accumulate(x, axis) obj = self._selected_obj if numeric_only: obj = obj._get_numeric_data() return self._python_apply_general(f, obj, is_transform=True) return self._cython_transform( "cummin", numeric_only=numeric_only, skipna=skipna ) def cummax( self, axis: AxisInt = 0, numeric_only: bool = False, **kwargs ) -> NDFrameT: """ Cumulative max for each group. Returns ------- Series or DataFrame """ skipna = kwargs.get("skipna", True) if axis != 0: f = lambda x: np.maximum.accumulate(x, axis) obj = self._selected_obj if numeric_only: obj = obj._get_numeric_data() return self._python_apply_general(f, obj, is_transform=True) return self._cython_transform( "cummax", numeric_only=numeric_only, skipna=skipna ) def _get_cythonized_result( self, base_func: Callable, cython_dtype: np.dtype, numeric_only: bool = False, needs_counts: bool = False, pre_processing=None, post_processing=None, how: str = "any_all", **kwargs, ): """ Get result for Cythonized functions. Parameters ---------- base_func : callable, Cythonized function to be called cython_dtype : np.dtype Type of the array that will be modified by the Cython call. numeric_only : bool, default False Whether only numeric datatypes should be computed needs_counts : bool, default False Whether the counts should be a part of the Cython call pre_processing : function, default None Function to be applied to `values` prior to passing to Cython. Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned by the Cython operation. This function is also responsible for raising a TypeError if the values have an invalid type. Raises if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its second argument, i.e. the signature should be (ndarray, Type). If `needs_nullable=True`, a third argument should be `nullable`, to allow for processing specific to nullable values. how : str, default any_all Determines if any/all cython interface or std interface is used. **kwargs : dict Extra arguments to be passed back to Cython funcs Returns ------- `Series` or `DataFrame` with filled values """ if post_processing and not callable(post_processing): raise ValueError("'post_processing' must be a callable!") if pre_processing and not callable(pre_processing): raise ValueError("'pre_processing' must be a callable!") grouper = self.grouper ids, _, ngroups = grouper.group_info base_func = partial(base_func, labels=ids) def blk_func(values: ArrayLike) -> ArrayLike: values = values.T ncols = 1 if values.ndim == 1 else values.shape[1] result: ArrayLike result = np.zeros(ngroups * ncols, dtype=cython_dtype) result = result.reshape((ngroups, ncols)) func = partial(base_func, out=result) inferences = None if needs_counts: counts = np.zeros(ngroups, dtype=np.int64) func = partial(func, counts=counts) is_datetimelike = values.dtype.kind in ["m", "M"] vals = values if is_datetimelike and how == "std": vals = vals.view("i8") if pre_processing: vals, inferences = pre_processing(vals) vals = vals.astype(cython_dtype, copy=False) if vals.ndim == 1: vals = vals.reshape((-1, 1)) func = partial(func, values=vals) if how != "std" or isinstance(values, BaseMaskedArray): mask = isna(values).view(np.uint8) if mask.ndim == 1: mask = mask.reshape(-1, 1) func = partial(func, mask=mask) if how != "std": is_nullable = isinstance(values, BaseMaskedArray) func = partial(func, nullable=is_nullable) elif isinstance(values, BaseMaskedArray): result_mask = np.zeros(result.shape, dtype=np.bool_) func = partial(func, result_mask=result_mask) # Call func to modify result in place if how == "std": func(**kwargs, is_datetimelike=is_datetimelike) else: func(**kwargs) if values.ndim == 1: assert result.shape[1] == 1, result.shape result = result[:, 0] if post_processing: pp_kwargs: dict[str, bool | np.ndarray] = {} pp_kwargs["nullable"] = isinstance(values, BaseMaskedArray) if how == "std" and pp_kwargs["nullable"]: pp_kwargs["result_mask"] = result_mask result = post_processing(result, inferences, **pp_kwargs) if how == "std" and is_datetimelike: values = cast("DatetimeArray | TimedeltaArray", values) unit = values.unit with warnings.catch_warnings(): # suppress "RuntimeWarning: invalid value encountered in cast" warnings.filterwarnings("ignore") result = result.astype(np.int64, copy=False) result = result.view(f"m8[{unit}]") return result.T # Operate block-wise instead of column-by-column mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) res_mgr = mgr.grouped_reduce(blk_func) out = self._wrap_agged_manager(res_mgr) return self._wrap_aggregated_output(out) def shift(self, periods: int = 1, freq=None, axis: Axis = 0, fill_value=None): """ Shift each group by periods observations. If freq is passed, the index will be increased using the periods and the freq. Parameters ---------- periods : int, default 1 Number of periods to shift. freq : str, optional Frequency string. axis : axis to shift, default 0 Shift direction. fill_value : optional The scalar value to use for newly introduced missing values. Returns ------- Series or DataFrame Object shifted within each group. See Also -------- Index.shift : Shift values of Index. """ if freq is not None or axis != 0: f = lambda x: x.shift(periods, freq, axis, fill_value) return self._python_apply_general(f, self._selected_obj, is_transform=True) ids, _, ngroups = self.grouper.group_info res_indexer = np.zeros(len(ids), dtype=np.int64) libgroupby.group_shift_indexer(res_indexer, ids, ngroups, periods) obj = self._obj_with_exclusions res = obj._reindex_with_indexers( {self.axis: (obj.axes[self.axis], res_indexer)}, fill_value=fill_value, allow_dups=True, ) return res def diff(self, periods: int = 1, axis: AxisInt = 0) -> NDFrameT: """ First discrete difference of element. Calculates the difference of each element compared with another element in the group (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : axis to shift, default 0 Take difference over rows (0) or columns (1). Returns ------- Series or DataFrame First differences. """ if axis != 0: return self.apply(lambda x: x.diff(periods=periods, axis=axis)) obj = self._obj_with_exclusions shifted = self.shift(periods=periods, axis=axis) # GH45562 - to retain existing behavior and match behavior of Series.diff(), # int8 and int16 are coerced to float32 rather than float64. dtypes_to_f32 = ["int8", "int16"] if obj.ndim == 1: if obj.dtype in dtypes_to_f32: shifted = shifted.astype("float32") else: to_coerce = [c for c, dtype in obj.dtypes.items() if dtype in dtypes_to_f32] if len(to_coerce): shifted = shifted.astype({c: "float32" for c in to_coerce}) return obj - shifted def pct_change( self, periods: int = 1, fill_method: FillnaOptions = "ffill", limit=None, freq=None, axis: Axis = 0, ): """ Calculate pct_change of each value to previous entry in group. Returns ------- Series or DataFrame Percentage changes within each group. """ # TODO(GH#23918): Remove this conditional for SeriesGroupBy when # GH#23918 is fixed if freq is not None or axis != 0: f = lambda x: x.pct_change( periods=periods, fill_method=fill_method, limit=limit, freq=freq, axis=axis, ) return self._python_apply_general(f, self._selected_obj, is_transform=True) if fill_method is None: # GH30463 fill_method = "ffill" limit = 0 filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby( self.grouper.codes, axis=self.axis, group_keys=self.group_keys ) shifted = fill_grp.shift(periods=periods, freq=freq, axis=self.axis) return (filled / shifted) - 1 def head(self, n: int = 5) -> NDFrameT: """ Return first n rows of each group. Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows from the original DataFrame with original index and order preserved (``as_index`` flag is ignored). Parameters ---------- n : int If positive: number of entries to include from start of each group. If negative: number of entries to exclude from end of each group. Returns ------- Series or DataFrame Subset of original Series or DataFrame as determined by n. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], ... columns=['A', 'B']) >>> df.groupby('A').head(1) A B 0 1 2 2 5 6 >>> df.groupby('A').head(-1) A B 0 1 2 """ mask = self._make_mask_from_positional_indexer(slice(None, n)) return self._mask_selected_obj(mask) def tail(self, n: int = 5) -> NDFrameT: """ Return last n rows of each group. Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows from the original DataFrame with original index and order preserved (``as_index`` flag is ignored). Parameters ---------- n : int If positive: number of entries to include from end of each group. If negative: number of entries to exclude from start of each group. Returns ------- Series or DataFrame Subset of original Series or DataFrame as determined by n. %(see_also)s Examples -------- >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], ... columns=['A', 'B']) >>> df.groupby('A').tail(1) A B 1 a 2 3 b 2 >>> df.groupby('A').tail(-1) A B 1 a 2 3 b 2 """ if n: mask = self._make_mask_from_positional_indexer(slice(-n, None)) else: mask = self._make_mask_from_positional_indexer([]) return self._mask_selected_obj(mask) def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT: """ Return _selected_obj with mask applied to the correct axis. Parameters ---------- mask : np.ndarray[bool] Boolean mask to apply. Returns ------- Series or DataFrame Filtered _selected_obj. """ ids = self.grouper.group_info[0] mask = mask & (ids != -1) if self.axis == 0: return self._selected_obj[mask] else: return self._selected_obj.iloc[:, mask] def _reindex_output( self, output: OutputFrameOrSeries, fill_value: Scalar = np.NaN, qs: npt.NDArray[np.float64] | None = None, ) -> OutputFrameOrSeries: """ If we have categorical groupers, then we might want to make sure that we have a fully re-indexed output to the levels. This means expanding the output space to accommodate all values in the cartesian product of our groups, regardless of whether they were observed in the data or not. This will expand the output space if there are missing groups. The method returns early without modifying the input if the number of groupings is less than 2, self.observed == True or none of the groupers are categorical. Parameters ---------- output : Series or DataFrame Object resulting from grouping and applying an operation. fill_value : scalar, default np.NaN Value to use for unobserved categories if self.observed is False. qs : np.ndarray[float64] or None, default None quantile values, only relevant for quantile. Returns ------- Series or DataFrame Object (potentially) re-indexed to include all possible groups. """ groupings = self.grouper.groupings if len(groupings) == 1: return output # if we only care about the observed values # we are done elif self.observed: return output # reindexing only applies to a Categorical grouper elif not any( isinstance(ping.grouping_vector, (Categorical, CategoricalIndex)) for ping in groupings ): return output levels_list = [ping.group_index for ping in groupings] names = self.grouper.names if qs is not None: # error: Argument 1 to "append" of "list" has incompatible type # "ndarray[Any, dtype[floating[_64Bit]]]"; expected "Index" levels_list.append(qs) # type: ignore[arg-type] names = names + [None] index = MultiIndex.from_product(levels_list, names=names) if self.sort: index = index.sort_values() if self.as_index: # Always holds for SeriesGroupBy unless GH#36507 is implemented d = { self.obj._get_axis_name(self.axis): index, "copy": False, "fill_value": fill_value, } return output.reindex(**d) # type: ignore[arg-type] # GH 13204 # Here, the categorical in-axis groupers, which need to be fully # expanded, are columns in `output`. An idea is to do: # output = output.set_index(self.grouper.names) # .reindex(index).reset_index() # but special care has to be taken because of possible not-in-axis # groupers. # So, we manually select and drop the in-axis grouper columns, # reindex `output`, and then reset the in-axis grouper columns. # Select in-axis groupers in_axis_grps = list( (i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis ) if len(in_axis_grps) > 0: g_nums, g_names = zip(*in_axis_grps) output = output.drop(labels=list(g_names), axis=1) # Set a temp index and reindex (possibly expanding) output = output.set_index(self.grouper.result_index).reindex( index, copy=False, fill_value=fill_value ) # Reset in-axis grouper columns # (using level numbers `g_nums` because level names may not be unique) if len(in_axis_grps) > 0: output = output.reset_index(level=g_nums) return output.reset_index(drop=True) def sample( self, n: int | None = None, frac: float | None = None, replace: bool = False, weights: Sequence | Series | None = None, random_state: RandomState | None = None, ): """ Return a random sample of items from each group. You can use `random_state` for reproducibility. .. versionadded:: 1.1.0 Parameters ---------- n : int, optional Number of items to return for each group. Cannot be used with `frac` and must be no larger than the smallest group unless `replace` is True. Default is one if `frac` is None. frac : float, optional Fraction of items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : list-like, optional Default None results in equal probability weighting. If passed a list-like then values must have the same length as the underlying DataFrame or Series object and will be used as sampling probabilities after normalization within each group. Values must be non-negative with at least one positive element within each group. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.4.0 np.random.Generator objects now accepted Returns ------- Series or DataFrame A new object of same type as caller containing items randomly sampled within each group from the caller object. See Also -------- DataFrame.sample: Generate random samples from a DataFrame object. numpy.random.choice: Generate a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame( ... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)} ... ) >>> df a b 0 red 0 1 red 1 2 blue 2 3 blue 3 4 black 4 5 black 5 Select one row at random for each distinct value in column a. The `random_state` argument can be used to guarantee reproducibility: >>> df.groupby("a").sample(n=1, random_state=1) a b 4 black 4 2 blue 2 1 red 1 Set `frac` to sample fixed proportions rather than counts: >>> df.groupby("a")["b"].sample(frac=0.5, random_state=2) 5 5 2 2 0 0 Name: b, dtype: int64 Control sample probabilities within groups by setting weights: >>> df.groupby("a").sample( ... n=1, ... weights=[1, 1, 1, 0, 0, 1], ... random_state=1, ... ) a b 5 black 5 2 blue 2 0 red 0 """ # noqa:E501 if self._selected_obj.empty: # GH48459 prevent ValueError when object is empty return self._selected_obj size = sample.process_sampling_size(n, frac, replace) if weights is not None: weights_arr = sample.preprocess_weights( self._selected_obj, weights, axis=self.axis ) random_state = com.random_state(random_state) group_iterator = self.grouper.get_iterator(self._selected_obj, self.axis) sampled_indices = [] for labels, obj in group_iterator: grp_indices = self.indices[labels] group_size = len(grp_indices) if size is not None: sample_size = size else: assert frac is not None sample_size = round(frac * group_size) grp_sample = sample.sample( group_size, size=sample_size, replace=replace, weights=None if weights is None else weights_arr[grp_indices], random_state=random_state, ) sampled_indices.append(grp_indices[grp_sample]) sampled_indices = np.concatenate(sampled_indices) return self._selected_obj.take(sampled_indices, axis=self.axis) AxisInt = int class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series class SeriesGroupBy(GroupBy[Series]): def _wrap_agged_manager(self, mgr: Manager) -> Series: return self.obj._constructor(mgr, name=self.obj.name) def _get_data_to_aggregate( self, *, numeric_only: bool = False, name: str | None = None ) -> SingleManager: ser = self._selected_obj single = ser._mgr if numeric_only and not is_numeric_dtype(ser.dtype): # GH#41291 match Series behavior kwd_name = "numeric_only" raise TypeError( f"Cannot use {kwd_name}=True with " f"{type(self).__name__}.{name} and non-numeric dtypes." ) return single def _iterate_slices(self) -> Iterable[Series]: yield self._selected_obj _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.groupby([1, 1, 2, 2]).min() 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg('min') 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) min max 1 1 2 2 3 4 The output column names can be controlled by passing the desired column names and aggregations as keyword arguments. >>> s.groupby([1, 1, 2, 2]).agg( ... minimum='min', ... maximum='max', ... ) minimum maximum 1 1 2 2 3 4 .. versionchanged:: 1.3.0 The resulting dtype will reflect the return value of the aggregating function. >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min()) 1 1.0 2 3.0 dtype: float64 """ ) _apply_docs["template"].format( input="series", examples=_apply_docs["series_examples"] ) ) def apply(self, func, *args, **kwargs) -> Series: return super().apply(func, *args, **kwargs) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): return self._aggregate_with_numba( func, *args, engine_kwargs=engine_kwargs, **kwargs ) relabeling = func is None columns = None if relabeling: columns, func = validate_func_kwargs(kwargs) kwargs = {} if isinstance(func, str): return getattr(self, func)(*args, **kwargs) elif isinstance(func, abc.Iterable): # Catch instances of lists / tuples # but not the class list / tuple itself. func = maybe_mangle_lambdas(func) ret = self._aggregate_multiple_funcs(func, *args, **kwargs) if relabeling: # columns is not narrowed by mypy from relabeling flag assert columns is not None # for mypy ret.columns = columns if not self.as_index: ret = self._insert_inaxis_grouper(ret) ret.index = default_index(len(ret)) return ret else: cyfunc = com.get_cython_func(func) if cyfunc and not args and not kwargs: return getattr(self, cyfunc)() if self.ngroups == 0: # e.g. test_evaluate_with_empty_groups without any groups to # iterate over, we have no output on which to do dtype # inference. We default to using the existing dtype. # xref GH#51445 obj = self._obj_with_exclusions return self.obj._constructor( [], name=self.obj.name, index=self.grouper.result_index, dtype=obj.dtype, ) if self.grouper.nkeys > 1: return self._python_agg_general(func, *args, **kwargs) try: return self._python_agg_general(func, *args, **kwargs) except KeyError: # KeyError raised in test_groupby.test_basic is bc the func does # a dictionary lookup on group.name, but group name is not # pinned in _python_agg_general, only in _aggregate_named result = self._aggregate_named(func, *args, **kwargs) # result is a dict whose keys are the elements of result_index result = Series(result, index=self.grouper.result_index) result = self._wrap_aggregated_output(result) return result agg = aggregate def _python_agg_general(self, func, *args, **kwargs): func = com.is_builtin_func(func) f = lambda x: func(x, *args, **kwargs) obj = self._obj_with_exclusions result = self.grouper.agg_series(obj, f) res = obj._constructor(result, name=obj.name) return self._wrap_aggregated_output(res) def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: if isinstance(arg, dict): if self.as_index: # GH 15931 raise SpecificationError("nested renamer is not supported") else: # GH#50684 - This accidentally worked in 1.x arg = list(arg.items()) elif any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] else: # list of functions / function names columns = [] for f in arg: columns.append(com.get_callable_name(f) or f) arg = zip(columns, arg) results: dict[base.OutputKey, DataFrame | Series] = {} with com.temp_setattr(self, "as_index", True): # Combine results using the index, need to adjust index after # if as_index=False (GH#50724) for idx, (name, func) in enumerate(arg): key = base.OutputKey(label=name, position=idx) results[key] = self.aggregate(func, *args, **kwargs) if any(isinstance(x, DataFrame) for x in results.values()): from pandas import concat res_df = concat( results.values(), axis=1, keys=[key.label for key in results] ) return res_df indexed_output = {key.position: val for key, val in results.items()} output = self.obj._constructor_expanddim(indexed_output, index=None) output.columns = Index(key.label for key in results) output = self._reindex_output(output) return output def _wrap_applied_output( self, data: Series, values: list[Any], not_indexed_same: bool = False, is_transform: bool = False, ) -> DataFrame | Series: """ Wrap the output of SeriesGroupBy.apply into the expected result. Parameters ---------- data : Series Input data for groupby operation. values : List[Any] Applied output for each group. not_indexed_same : bool, default False Whether the applied outputs are not indexed the same as the group axes. Returns ------- DataFrame or Series """ if len(values) == 0: # GH #6265 if is_transform: # GH#47787 see test_group_on_empty_multiindex res_index = data.index else: res_index = self.grouper.result_index return self.obj._constructor( [], name=self.obj.name, index=res_index, dtype=data.dtype, ) assert values is not None if isinstance(values[0], dict): # GH #823 #24880 index = self.grouper.result_index res_df = self.obj._constructor_expanddim(values, index=index) res_df = self._reindex_output(res_df) # if self.observed is False, # keep all-NaN rows created while re-indexing res_ser = res_df.stack(dropna=self.observed) res_ser.name = self.obj.name return res_ser elif isinstance(values[0], (Series, DataFrame)): result = self._concat_objects( values, not_indexed_same=not_indexed_same, is_transform=is_transform, ) if isinstance(result, Series): result.name = self.obj.name if not self.as_index and not_indexed_same: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result else: # GH #6265 #24880 result = self.obj._constructor( data=values, index=self.grouper.result_index, name=self.obj.name ) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return self._reindex_output(result) def _aggregate_named(self, func, *args, **kwargs): # Note: this is very similar to _aggregate_series_pure_python, # but that does not pin group.name result = {} initialized = False for name, group in self: object.__setattr__(group, "name", name) output = func(group, *args, **kwargs) output = libreduction.extract_result(output) if not initialized: # We only do this validation on the first iteration libreduction.check_result_array(output, group.dtype) initialized = True result[name] = output return result __examples_series_doc = dedent( """ >>> ser = pd.Series( ... [390.0, 350.0, 30.0, 20.0], ... index=["Falcon", "Falcon", "Parrot", "Parrot"], ... name="Max Speed") >>> grouped = ser.groupby([1, 1, 2, 2]) >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) Falcon 0.707107 Falcon -0.707107 Parrot 0.707107 Parrot -0.707107 Name: Max Speed, dtype: float64 Broadcast result of the transformation >>> grouped.transform(lambda x: x.max() - x.min()) Falcon 40.0 Falcon 40.0 Parrot 10.0 Parrot 10.0 Name: Max Speed, dtype: float64 >>> grouped.transform("mean") Falcon 370.0 Falcon 370.0 Parrot 25.0 Parrot 25.0 Name: Max Speed, dtype: float64 .. versionchanged:: 1.3.0 The resulting dtype will reflect the return value of the passed ``func``, for example: >>> grouped.transform(lambda x: x.astype(int).max()) Falcon 390 Falcon 390 Parrot 30 Parrot 30 Name: Max Speed, dtype: int64 """ ) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): return self._transform( func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) def _cython_transform( self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs ): assert axis == 0 # handled by caller obj = self._selected_obj try: result = self.grouper._cython_operation( "transform", obj._values, how, axis, **kwargs ) except NotImplementedError as err: # e.g. test_groupby_raises_string raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err return obj._constructor(result, index=self.obj.index, name=obj.name) def _transform_general(self, func: Callable, *args, **kwargs) -> Series: """ Transform with a callable func`. """ assert callable(func) klass = type(self.obj) results = [] for name, group in self.grouper.get_iterator( self._selected_obj, axis=self.axis ): # this setattr is needed for test_transform_lambda_with_datetimetz object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) results.append(klass(res, index=group.index)) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat concatenated = concat(results) result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) result.name = self.obj.name return result def filter(self, func, dropna: bool = True, *args, **kwargs): """ Filter elements from groups that don't satisfy a criterion. Elements from groups are filtered if they do not satisfy the boolean criterion specified by func. Parameters ---------- func : function Criterion to apply to each group. Should return True or False. dropna : bool Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- Series Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x) -> bool: b = wrapper(x) return notna(b) and b try: indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] except (ValueError, TypeError) as err: raise TypeError("the filter must return a boolean result") from err filtered = self._apply_filter(indices, dropna) return filtered def nunique(self, dropna: bool = True) -> Series | DataFrame: """ Return number of unique elements in the group. Returns ------- Series Number of unique values within each group. """ ids, _, _ = self.grouper.group_info val = self.obj._values codes, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((codes, ids)) codes = codes[sorter] ids = ids[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, codes[1:] != codes[:-1]] # 1st item of each group is a new unique observation mask = codes == -1 if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype("int64", copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res if len(ids) > 0: # GH#21334s res[ids[idx]] = out result: Series | DataFrame = self.obj._constructor( res, index=ri, name=self.obj.name ) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return self._reindex_output(result, fill_value=0) def describe(self, **kwargs): return super().describe(**kwargs) def value_counts( self, normalize: bool = False, sort: bool = True, ascending: bool = False, bins=None, dropna: bool = True, ) -> Series | DataFrame: name = "proportion" if normalize else "count" if bins is None: result = self._value_counts( normalize=normalize, sort=sort, ascending=ascending, dropna=dropna ) result.name = name return result from pandas.core.reshape.merge import get_join_indexers from pandas.core.reshape.tile import cut ids, _, _ = self.grouper.group_info val = self.obj._values index_names = self.grouper.names + [self.obj.name] if is_categorical_dtype(val.dtype) or ( bins is not None and not np.iterable(bins) ): # scalar bins cannot be done at top level # in a backward compatible way # GH38672 relates to categorical dtype ser = self.apply( Series.value_counts, normalize=normalize, sort=sort, ascending=ascending, bins=bins, ) ser.name = name ser.index.names = index_names return ser # groupby removes null keys from groupings mask = ids != -1 ids, val = ids[mask], val[mask] if bins is None: lab, lev = algorithms.factorize(val, sort=True) llab = lambda lab, inc: lab[inc] else: # lab is a Categorical with categories an IntervalIndex cat_ser = cut(Series(val, copy=False), bins, include_lowest=True) cat_obj = cast("Categorical", cat_ser._values) lev = cat_obj.categories lab = lev.take( cat_obj.codes, allow_fill=True, fill_value=lev._na_value, ) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] if is_interval_dtype(lab.dtype): # TODO: should we do this inside II? lab_interval = cast(Interval, lab) sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) else: sorter = np.lexsort((lab, ids)) ids, lab = ids[sorter], lab[sorter] # group boundaries are where group ids change idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] idx = np.r_[0, idchanges] if not len(ids): idx = idchanges # new values are where sorted labels change lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) inc = np.r_[True, lchanges] if not len(val): inc = lchanges inc[idx] = True # group boundaries are also new values out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts # num. of times each group should be repeated rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) # multi-index components codes = self.grouper.reconstructed_codes codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] levels = [ping.group_index for ping in self.grouper.groupings] + [lev] if dropna: mask = codes[-1] != -1 if mask.all(): dropna = False else: out, codes = out[mask], [level_codes[mask] for level_codes in codes] if normalize: out = out.astype("float") d = np.diff(np.r_[idx, len(ids)]) if dropna: m = ids[lab == -1] np.add.at(d, m, -1) acc = rep(d)[mask] else: acc = rep(d) out /= acc if sort and bins is None: cat = ids[inc][mask] if dropna else ids[inc] sorter = np.lexsort((out if ascending else -out, cat)) out, codes[-1] = out[sorter], codes[-1][sorter] if bins is not None: # for compat. with libgroupby.value_counts need to ensure every # bin is present at every index level, null filled with zeros diff = np.zeros(len(out), dtype="bool") for level_codes in codes[:-1]: diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] ncat, nbin = diff.sum(), len(levels[-1]) left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] right = [diff.cumsum() - 1, codes[-1]] _, idx = get_join_indexers(left, right, sort=False, how="left") out = np.where(idx != -1, out[idx], 0) if sort: sorter = np.lexsort((out if ascending else -out, left[0])) out, left[-1] = out[sorter], left[-1][sorter] # build the multi-index w/ full levels def build_codes(lev_codes: np.ndarray) -> np.ndarray: return np.repeat(lev_codes[diff], nbin) codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] codes.append(left[-1]) mi = MultiIndex( levels=levels, codes=codes, names=index_names, verify_integrity=False ) if is_integer_dtype(out.dtype): out = ensure_int64(out) result = self.obj._constructor(out, index=mi, name=name) if not self.as_index: result = result.reset_index() return result def fillna( self, value: object | ArrayLike | None = None, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool = False, limit: int | None = None, downcast: dict | None = None, ) -> Series | None: """ Fill NA/NaN values using the specified method within groups. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. Users wanting to use the ``value`` argument and not ``method`` should prefer :meth:`.Series.fillna` as this will produce the same result and be more performant. method : {{'bfill', 'ffill', None}}, default None Method to use for filling holes. ``'ffill'`` will propagate the last valid observation forward within a group. ``'bfill'`` will use next valid observation to fill the gap. axis : {0 or 'index', 1 or 'columns'} Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`. inplace : bool, default False Broken. Do not set to True. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill within a group. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Series Object with missing values filled within groups. See Also -------- ffill : Forward fill values within a group. bfill : Backward fill values within a group. Examples -------- >>> ser = pd.Series([np.nan, np.nan, 2, 3, np.nan, np.nan]) >>> ser 0 NaN 1 NaN 2 2.0 3 3.0 4 NaN 5 NaN dtype: float64 Propagate non-null values forward or backward within each group. >>> ser.groupby([0, 0, 0, 1, 1, 1]).fillna(method="ffill") 0 NaN 1 NaN 2 2.0 3 3.0 4 3.0 5 3.0 dtype: float64 >>> ser.groupby([0, 0, 0, 1, 1, 1]).fillna(method="bfill") 0 2.0 1 2.0 2 2.0 3 3.0 4 NaN 5 NaN dtype: float64 Only replace the first NaN element within a group. >>> ser.groupby([0, 0, 0, 1, 1, 1]).fillna(method="ffill", limit=1) 0 NaN 1 NaN 2 2.0 3 3.0 4 3.0 5 NaN dtype: float64 """ result = self._op_via_apply( "fillna", value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, ) return result def take( self, indices: TakeIndexer, axis: Axis = 0, **kwargs, ) -> Series: """ Return the elements in the given *positional* indices in each group. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. If a requested index does not exist for some group, this method will raise. To get similar behavior that ignores indices that don't exist, see :meth:`.SeriesGroupBy.nth`. Parameters ---------- indices : array-like An array of ints indicating which positions to take in each group. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `SeriesGroupBy` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- Series A Series containing the elements taken from each group. See Also -------- Series.take : Take elements from a Series along an axis. Series.loc : Select a subset of a DataFrame by labels. Series.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan), ... ('rabbit', 'mammal', 15.0)], ... columns=['name', 'class', 'max_speed'], ... index=[4, 3, 2, 1, 0]) >>> df name class max_speed 4 falcon bird 389.0 3 parrot bird 24.0 2 lion mammal 80.5 1 monkey mammal NaN 0 rabbit mammal 15.0 >>> gb = df["name"].groupby([1, 1, 2, 2, 2]) Take elements at positions 0 and 1 along the axis 0 in each group (default). >>> gb.take([0, 1]) 1 4 falcon 3 parrot 2 2 lion 1 monkey Name: name, dtype: object We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> gb.take([-1, -2]) 1 3 parrot 4 falcon 2 0 rabbit 1 monkey Name: name, dtype: object """ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) return result def skew( self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True, numeric_only: bool = False, **kwargs, ) -> Series: """ Return unbiased skew within groups. Normalized by N-1. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default 0 Axis for the function to be applied on. This parameter is only for compatibility with DataFrame and is unused. skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. **kwargs Additional keyword arguments to be passed to the function. Returns ------- Series See Also -------- Series.skew : Return unbiased skew over requested axis. Examples -------- >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.], ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon', ... 'Parrot', 'Parrot', 'Parrot'], ... name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Falcon 357.0 Falcon NaN Parrot 22.0 Parrot 20.0 Parrot 30.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).skew() Falcon 1.525174 Parrot 1.457863 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).skew(skipna=False) Falcon NaN Parrot 1.457863 Name: Max Speed, dtype: float64 """ result = self._op_via_apply( "skew", axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs, ) return result def plot(self): result = GroupByPlot(self) return result def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: f = partial(Series.nlargest, n=n, keep=keep) data = self._selected_obj # Don't change behavior if result index happens to be the same, i.e. # already ordered and n >= all group sizes. result = self._python_apply_general(f, data, not_indexed_same=True) return result def nsmallest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: f = partial(Series.nsmallest, n=n, keep=keep) data = self._selected_obj # Don't change behavior if result index happens to be the same, i.e. # already ordered and n >= all group sizes. result = self._python_apply_general(f, data, not_indexed_same=True) return result def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series: result = self._op_via_apply("idxmin", axis=axis, skipna=skipna) return result def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series: result = self._op_via_apply("idxmax", axis=axis, skipna=skipna) return result def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> Series: result = self._op_via_apply( "corr", other=other, method=method, min_periods=min_periods ) return result def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1 ) -> Series: result = self._op_via_apply( "cov", other=other, min_periods=min_periods, ddof=ddof ) return result def is_monotonic_increasing(self) -> Series: return self.apply(lambda ser: ser.is_monotonic_increasing) def is_monotonic_decreasing(self) -> Series: return self.apply(lambda ser: ser.is_monotonic_decreasing) def hist( self, by=None, ax=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, figsize: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, ): result = self._op_via_apply( "hist", by=by, ax=ax, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, figsize=figsize, bins=bins, backend=backend, legend=legend, **kwargs, ) return result def dtype(self) -> Series: return self.apply(lambda ser: ser.dtype) def unique(self) -> Series: result = self._op_via_apply("unique") return result class DataFrameGroupBy(GroupBy[DataFrame]): _agg_examples_doc = dedent( """ Examples -------- >>> df = pd.DataFrame( ... { ... "A": [1, 1, 2, 2], ... "B": [1, 2, 3, 4], ... "C": [0.362838, 0.227877, 1.267767, -0.562860], ... } ... ) >>> df A B C 0 1 1 0.362838 1 1 2 0.227877 2 2 3 1.267767 3 2 4 -0.562860 The aggregation is for each column. >>> df.groupby('A').agg('min') B C A 1 1 0.227877 2 3 -0.562860 Multiple aggregations >>> df.groupby('A').agg(['min', 'max']) B C min max min max A 1 1 2 0.227877 0.362838 2 3 4 -0.562860 1.267767 Select a column for aggregation >>> df.groupby('A').B.agg(['min', 'max']) min max A 1 1 2 2 3 4 User-defined function for aggregation >>> df.groupby('A').agg(lambda x: sum(x) + 2) B C A 1 5 2.590715 2 9 2.704907 Different aggregations per column >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'}) B C min max sum A 1 1 2 0.590715 2 3 4 0.704907 To control the output names with different aggregations per column, pandas supports "named aggregation" >>> df.groupby("A").agg( ... b_min=pd.NamedAgg(column="B", aggfunc="min"), ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")) b_min c_sum A 1 1 0.590715 2 3 0.704907 - The keywords are the *output* column names - The values are tuples whose first element is the column to select and the second element is the aggregation to apply to that column. Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields ``['column', 'aggfunc']`` to make it clearer what the arguments are. As usual, the aggregation can be a callable or a string alias. See :ref:`groupby.aggregate.named` for more. .. versionchanged:: 1.3.0 The resulting dtype will reflect the return value of the aggregating function. >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min()) B A 1 1.0 2 3.0 """ ) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): return self._aggregate_with_numba( func, *args, engine_kwargs=engine_kwargs, **kwargs ) relabeling, func, columns, order = reconstruct_func(func, **kwargs) func = maybe_mangle_lambdas(func) op = GroupByApply(self, func, args, kwargs) result = op.agg() if not is_dict_like(func) and result is not None: return result elif relabeling: # this should be the only (non-raising) case with relabeling # used reordered index of columns result = cast(DataFrame, result) result = result.iloc[:, order] result = cast(DataFrame, result) # error: Incompatible types in assignment (expression has type # "Optional[List[str]]", variable has type # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], # Index, Series], Sequence[Any]]") result.columns = columns # type: ignore[assignment] if result is None: # grouper specific aggregations if self.grouper.nkeys > 1: # test_groupby_as_index_series_scalar gets here with 'not self.as_index' return self._python_agg_general(func, *args, **kwargs) elif args or kwargs: # test_pass_args_kwargs gets here (with and without as_index) # can't return early result = self._aggregate_frame(func, *args, **kwargs) elif self.axis == 1: # _aggregate_multiple_funcs does not allow self.axis == 1 # Note: axis == 1 precludes 'not self.as_index', see __init__ result = self._aggregate_frame(func) return result else: # try to treat as if we are passing a list gba = GroupByApply(self, [func], args=(), kwargs={}) try: result = gba.agg() except ValueError as err: if "No objects to concatenate" not in str(err): raise # _aggregate_frame can fail with e.g. func=Series.mode, # where it expects 1D values but would be getting 2D values # In other tests, using aggregate_frame instead of GroupByApply # would give correct values but incorrect dtypes # object vs float64 in test_cython_agg_empty_buckets # float64 vs int64 in test_category_order_apply result = self._aggregate_frame(func) else: # GH#32040, GH#35246 # e.g. test_groupby_as_index_select_column_sum_empty_df result = cast(DataFrame, result) result.columns = self._obj_with_exclusions.columns.copy() if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result agg = aggregate def _python_agg_general(self, func, *args, **kwargs): func = com.is_builtin_func(func) f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict output: dict[base.OutputKey, ArrayLike] = {} if self.ngroups == 0: # e.g. test_evaluate_with_empty_groups different path gets different # result dtype in empty case. return self._python_apply_general(f, self._selected_obj, is_agg=True) for idx, obj in enumerate(self._iterate_slices()): name = obj.name result = self.grouper.agg_series(obj, f) key = base.OutputKey(label=name, position=idx) output[key] = result if not output: # e.g. test_margins_no_values_no_cols return self._python_apply_general(f, self._selected_obj) res = self._indexed_output_to_ndframe(output) return self._wrap_aggregated_output(res) def _iterate_slices(self) -> Iterable[Series]: obj = self._selected_obj if self.axis == 1: obj = obj.T if isinstance(obj, Series) and obj.name not in self.exclusions: # Occurs when doing DataFrameGroupBy(...)["X"] yield obj else: for label, values in obj.items(): if label in self.exclusions: # Note: if we tried to just iterate over _obj_with_exclusions, # we would break test_wrap_agg_out by yielding a column # that is skipped here but not dropped from obj_with_exclusions continue yield values def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: if self.grouper.nkeys != 1: raise AssertionError("Number of keys must be 1") obj = self._obj_with_exclusions result: dict[Hashable, NDFrame | np.ndarray] = {} for name, grp_df in self.grouper.get_iterator(obj, self.axis): fres = func(grp_df, *args, **kwargs) result[name] = fres result_index = self.grouper.result_index other_ax = obj.axes[1 - self.axis] out = self.obj._constructor(result, index=other_ax, columns=result_index) if self.axis == 0: out = out.T return out def _wrap_applied_output( self, data: DataFrame, values: list, not_indexed_same: bool = False, is_transform: bool = False, ): if len(values) == 0: if is_transform: # GH#47787 see test_group_on_empty_multiindex res_index = data.index else: res_index = self.grouper.result_index result = self.obj._constructor(index=res_index, columns=data.columns) result = result.astype(data.dtypes, copy=False) return result # GH12824 # using values[0] here breaks test_groupby_apply_none_first first_not_none = next(com.not_none(*values), None) if first_not_none is None: # GH9684 - All values are None, return an empty frame. return self.obj._constructor() elif isinstance(first_not_none, DataFrame): return self._concat_objects( values, not_indexed_same=not_indexed_same, is_transform=is_transform, ) key_index = self.grouper.result_index if self.as_index else None if isinstance(first_not_none, (np.ndarray, Index)): # GH#1738: values is list of arrays of unequal lengths # fall through to the outer else clause # TODO: sure this is right? we used to do this # after raising AttributeError above return self.obj._constructor_sliced( values, index=key_index, name=self._selection ) elif not isinstance(first_not_none, Series): # values are not series or array-like but scalars # self._selection not passed through to Series as the # result should not take the name of original selection # of columns if self.as_index: return self.obj._constructor_sliced(values, index=key_index) else: result = self.obj._constructor(values, columns=[self._selection]) result = self._insert_inaxis_grouper(result) return result else: # values are Series return self._wrap_applied_output_series( values, not_indexed_same, first_not_none, key_index, is_transform, ) def _wrap_applied_output_series( self, values: list[Series], not_indexed_same: bool, first_not_none, key_index: Index | None, is_transform: bool, ) -> DataFrame | Series: kwargs = first_not_none._construct_axes_dict() backup = Series(**kwargs) values = [x if (x is not None) else backup for x in values] all_indexed_same = all_indexes_same(x.index for x in values) if not all_indexed_same: # GH 8467 return self._concat_objects( values, not_indexed_same=True, is_transform=is_transform, ) # Combine values # vstack+constructor is faster than concat and handles MI-columns stacked_values = np.vstack([np.asarray(v) for v in values]) if self.axis == 0: index = key_index columns = first_not_none.index.copy() if columns.name is None: # GH6124 - propagate name of Series when it's consistent names = {v.name for v in values} if len(names) == 1: columns.name = list(names)[0] else: index = first_not_none.index columns = key_index stacked_values = stacked_values.T if stacked_values.dtype == object: # We'll have the DataFrame constructor do inference stacked_values = stacked_values.tolist() result = self.obj._constructor(stacked_values, index=index, columns=columns) if not self.as_index: result = self._insert_inaxis_grouper(result) return self._reindex_output(result) def _cython_transform( self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs, ) -> DataFrame: assert axis == 0 # handled by caller # With self.axis == 0, we have multi-block tests # e.g. test_rank_min_int, test_cython_transform_frame # test_transform_numeric_ret # With self.axis == 1, _get_data_to_aggregate does a transpose # so we always have a single block. mgr: Manager2D = self._get_data_to_aggregate( numeric_only=numeric_only, name=how ) def arr_func(bvalues: ArrayLike) -> ArrayLike: return self.grouper._cython_operation( "transform", bvalues, how, 1, **kwargs ) # We could use `mgr.apply` here and not have to set_axis, but # we would have to do shape gymnastics for ArrayManager compat res_mgr = mgr.grouped_reduce(arr_func) res_mgr.set_axis(1, mgr.axes[1]) res_df = self.obj._constructor(res_mgr) res_df = self._maybe_transpose_result(res_df) return res_df def _transform_general(self, func, *args, **kwargs): from pandas.core.reshape.concat import concat applied = [] obj = self._obj_with_exclusions gen = self.grouper.get_iterator(obj, axis=self.axis) fast_path, slow_path = self._define_paths(func, *args, **kwargs) # Determine whether to use slow or fast path by evaluating on the first group. # Need to handle the case of an empty generator and process the result so that # it does not need to be computed again. try: name, group = next(gen) except StopIteration: pass else: object.__setattr__(group, "name", name) try: path, res = self._choose_path(fast_path, slow_path, group) except ValueError as err: # e.g. test_transform_with_non_scalar_group msg = "transform must return a scalar value for each group" raise ValueError(msg) from err if group.size > 0: res = _wrap_transform_general_frame(self.obj, group, res) applied.append(res) # Compute and process with the remaining groups for name, group in gen: if group.size == 0: continue object.__setattr__(group, "name", name) res = path(group) res = _wrap_transform_general_frame(self.obj, group, res) applied.append(res) concat_index = obj.columns if self.axis == 0 else obj.index other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1 concatenated = concat(applied, axis=self.axis, verify_integrity=False) concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False) return self._set_result_index_ordered(concatenated) __examples_dataframe_doc = dedent( """ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : ['one', 'one', 'two', 'three', ... 'two', 'two'], ... 'C' : [1, 5, 5, 2, 5, 5], ... 'D' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A')[['C', 'D']] >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) C D 0 -1.154701 -0.577350 1 0.577350 0.000000 2 0.577350 1.154701 3 -1.154701 -1.000000 4 0.577350 -0.577350 5 0.577350 1.000000 Broadcast result of the transformation >>> grouped.transform(lambda x: x.max() - x.min()) C D 0 4.0 6.0 1 3.0 8.0 2 4.0 6.0 3 3.0 8.0 4 4.0 6.0 5 3.0 8.0 >>> grouped.transform("mean") C D 0 3.666667 4.0 1 4.000000 5.0 2 3.666667 4.0 3 4.000000 5.0 4 3.666667 4.0 5 4.000000 5.0 .. versionchanged:: 1.3.0 The resulting dtype will reflect the return value of the passed ``func``, for example: >>> grouped.transform(lambda x: x.astype(int).max()) C D 0 5 8 1 5 9 2 5 8 3 5 9 4 5 8 5 5 9 """ ) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): return self._transform( func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) def _define_paths(self, func, *args, **kwargs): if isinstance(func, str): fast_path = lambda group: getattr(group, func)(*args, **kwargs) slow_path = lambda group: group.apply( lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis ) else: fast_path = lambda group: func(group, *args, **kwargs) slow_path = lambda group: group.apply( lambda x: func(x, *args, **kwargs), axis=self.axis ) return fast_path, slow_path def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame): path = slow_path res = slow_path(group) if self.ngroups == 1: # no need to evaluate multiple paths when only # a single group exists return path, res # if we make it here, test if we can use the fast path try: res_fast = fast_path(group) except AssertionError: raise # pragma: no cover except Exception: # GH#29631 For user-defined function, we can't predict what may be # raised; see test_transform.test_transform_fastpath_raises return path, res # verify fast path returns either: # a DataFrame with columns equal to group.columns # OR a Series with index equal to group.columns if isinstance(res_fast, DataFrame): if not res_fast.columns.equals(group.columns): return path, res elif isinstance(res_fast, Series): if not res_fast.index.equals(group.columns): return path, res else: return path, res if res_fast.equals(res): path = fast_path return path, res def filter(self, func, dropna: bool = True, *args, **kwargs): """ Filter elements from groups that don't satisfy a criterion. Elements from groups are filtered if they do not satisfy the boolean criterion specified by func. Parameters ---------- func : function Criterion to apply to each group. Should return True or False. dropna : bool Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0 """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (is_scalar(res) and isna(res)): if notna(res) and res: indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError( f"filter function returned a {type(res).__name__}, " "but expected a scalar bool" ) return self._apply_filter(indices, dropna) def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: if self.axis == 1: # GH 37725 raise ValueError("Cannot subset columns when using axis=1") # per GH 23566 if isinstance(key, tuple) and len(key) > 1: # if len == 1, then it becomes a SeriesGroupBy and this is actually # valid syntax, so don't raise raise ValueError( "Cannot subset columns with a tuple with more than one element. " "Use a list instead." ) return super().__getitem__(key) def _gotitem(self, key, ndim: int, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy( subset, self.grouper, axis=self.axis, level=self.level, grouper=self.grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna, ) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy( subset, level=self.level, grouper=self.grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna, ) raise AssertionError("invalid ndim for _gotitem") def _get_data_to_aggregate( self, *, numeric_only: bool = False, name: str | None = None ) -> Manager2D: obj = self._obj_with_exclusions if self.axis == 1: mgr = obj.T._mgr else: mgr = obj._mgr if numeric_only: mgr = mgr.get_numeric_data(copy=False) return mgr def _indexed_output_to_ndframe( self, output: Mapping[base.OutputKey, ArrayLike] ) -> DataFrame: """ Wrap the dict result of a GroupBy aggregation into a DataFrame. """ indexed_output = {key.position: val for key, val in output.items()} columns = Index([key.label for key in output]) columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names) result = self.obj._constructor(indexed_output) result.columns = columns return result def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: return self.obj._constructor(mgr) def _iterate_column_groupbys(self, obj: DataFrame): for i, colname in enumerate(obj.columns): yield colname, SeriesGroupBy( obj.iloc[:, i], selection=colname, grouper=self.grouper, exclusions=self.exclusions, observed=self.observed, ) def _apply_to_column_groupbys(self, func, obj: DataFrame) -> DataFrame: from pandas.core.reshape.concat import concat columns = obj.columns results = [ func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj) ] if not len(results): # concat would raise return DataFrame([], columns=columns, index=self.grouper.result_index) else: return concat(results, keys=columns, axis=1) def nunique(self, dropna: bool = True) -> DataFrame: """ Return DataFrame with counts of unique elements in each position. Parameters ---------- dropna : bool, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() value1 value2 id egg 1 1 ham 1 2 spam 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ if self.axis != 0: # see test_groupby_crash_on_nunique return self._python_apply_general( lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True ) obj = self._obj_with_exclusions results = self._apply_to_column_groupbys( lambda sgb: sgb.nunique(dropna), obj=obj ) if not self.as_index: results.index = default_index(len(results)) results = self._insert_inaxis_grouper(results) return results def idxmax( self, axis: Axis | None = None, skipna: bool = True, numeric_only: bool = False, ) -> DataFrame: """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default None The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. If axis is not provided, grouper's axis is used. .. versionchanged:: 2.0.0 skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax : Return index of the maximum element. Notes ----- This method is the DataFrame version of ``ndarray.argmax``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the maximum value in each column. >>> df.idxmax() consumption Wheat Products co2_emissions Beef dtype: object To return the index for the maximum value in each row, use ``axis="columns"``. >>> df.idxmax(axis="columns") Pork co2_emissions Wheat Products consumption Beef co2_emissions dtype: object """ if axis is None: axis = self.axis def func(df): return df.idxmax(axis=axis, skipna=skipna, numeric_only=numeric_only) func.__name__ = "idxmax" result = self._python_apply_general( func, self._obj_with_exclusions, not_indexed_same=True ) return result def idxmin( self, axis: Axis | None = None, skipna: bool = True, numeric_only: bool = False, ) -> DataFrame: """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default None The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. If axis is not provided, grouper's axis is used. .. versionchanged:: 2.0.0 skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionadded:: 1.5.0 Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin : Return index of the minimum element. Notes ----- This method is the DataFrame version of ``ndarray.argmin``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the minimum value in each column. >>> df.idxmin() consumption Pork co2_emissions Wheat Products dtype: object To return the index for the minimum value in each row, use ``axis="columns"``. >>> df.idxmin(axis="columns") Pork consumption Wheat Products co2_emissions Beef consumption dtype: object """ if axis is None: axis = self.axis def func(df): return df.idxmin(axis=axis, skipna=skipna, numeric_only=numeric_only) func.__name__ = "idxmin" result = self._python_apply_general( func, self._obj_with_exclusions, not_indexed_same=True ) return result boxplot = boxplot_frame_groupby def value_counts( self, subset: Sequence[Hashable] | None = None, normalize: bool = False, sort: bool = True, ascending: bool = False, dropna: bool = True, ) -> DataFrame | Series: """ Return a Series or DataFrame containing counts of unique rows. .. versionadded:: 1.4.0 Parameters ---------- subset : list-like, optional Columns to use when counting unique combinations. normalize : bool, default False Return proportions rather than frequencies. sort : bool, default True Sort by frequencies. ascending : bool, default False Sort in ascending order. dropna : bool, default True Don’t include counts of rows that contain NA values. Returns ------- Series or DataFrame Series if the groupby as_index is True, otherwise DataFrame. See Also -------- Series.value_counts: Equivalent method on Series. DataFrame.value_counts: Equivalent method on DataFrame. SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy. Notes ----- - If the groupby as_index is True then the returned Series will have a MultiIndex with one level per input column. - If the groupby as_index is False then the returned DataFrame will have an additional column with the value_counts. The column is labelled 'count' or 'proportion', depending on the ``normalize`` parameter. By default, rows that contain any NA values are omitted from the result. By default, the result will be in descending order so that the first element of each group is the most frequently-occurring row. Examples -------- >>> df = pd.DataFrame({ ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'], ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'], ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR'] ... }) >>> df gender education country 0 male low US 1 male medium FR 2 female high US 3 male low FR 4 female high FR 5 male low FR >>> df.groupby('gender').value_counts() gender education country female high FR 1 US 1 male low FR 2 US 1 medium FR 1 Name: count, dtype: int64 >>> df.groupby('gender').value_counts(ascending=True) gender education country female high FR 1 US 1 male low US 1 medium FR 1 low FR 2 Name: count, dtype: int64 >>> df.groupby('gender').value_counts(normalize=True) gender education country female high FR 0.50 US 0.50 male low FR 0.50 US 0.25 medium FR 0.25 Name: proportion, dtype: float64 >>> df.groupby('gender', as_index=False).value_counts() gender education country count 0 female high FR 1 1 female high US 1 2 male low FR 2 3 male low US 1 4 male medium FR 1 >>> df.groupby('gender', as_index=False).value_counts(normalize=True) gender education country proportion 0 female high FR 0.50 1 female high US 0.50 2 male low FR 0.50 3 male low US 0.25 4 male medium FR 0.25 """ return self._value_counts(subset, normalize, sort, ascending, dropna) def fillna( self, value: Hashable | Mapping | Series | DataFrame = None, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool = False, limit=None, downcast=None, ) -> DataFrame | None: """ Fill NA/NaN values using the specified method within groups. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. Users wanting to use the ``value`` argument and not ``method`` should prefer :meth:`.DataFrame.fillna` as this will produce the same result and be more performant. method : {{'bfill', 'ffill', None}}, default None Method to use for filling holes. ``'ffill'`` will propagate the last valid observation forward within a group. ``'bfill'`` will use next valid observation to fill the gap. axis : {0 or 'index', 1 or 'columns'} Axis along which to fill missing values. When the :class:`DataFrameGroupBy` ``axis`` argument is ``0``, using ``axis=1`` here will produce the same results as :meth:`.DataFrame.fillna`. When the :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0`` or ``axis=1`` here will produce the same results. inplace : bool, default False Broken. Do not set to True. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill within a group. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- DataFrame Object with missing values filled. See Also -------- ffill : Forward fill values within a group. bfill : Backward fill values within a group. Examples -------- >>> df = pd.DataFrame( ... { ... "key": [0, 0, 1, 1, 1], ... "A": [np.nan, 2, np.nan, 3, np.nan], ... "B": [2, 3, np.nan, np.nan, np.nan], ... "C": [np.nan, np.nan, 2, np.nan, np.nan], ... } ... ) >>> df key A B C 0 0 NaN 2.0 NaN 1 0 2.0 3.0 NaN 2 1 NaN NaN 2.0 3 1 3.0 NaN NaN 4 1 NaN NaN NaN Propagate non-null values forward or backward within each group along columns. >>> df.groupby("key").fillna(method="ffill") A B C 0 NaN 2.0 NaN 1 2.0 3.0 NaN 2 NaN NaN 2.0 3 3.0 NaN 2.0 4 3.0 NaN 2.0 >>> df.groupby("key").fillna(method="bfill") A B C 0 2.0 2.0 NaN 1 2.0 3.0 NaN 2 3.0 NaN 2.0 3 3.0 NaN NaN 4 NaN NaN NaN Propagate non-null values forward or backward within each group along rows. >>> df.groupby([0, 0, 1, 1], axis=1).fillna(method="ffill") key A B C 0 0.0 0.0 2.0 2.0 1 0.0 2.0 3.0 3.0 2 1.0 1.0 NaN 2.0 3 1.0 3.0 NaN NaN 4 1.0 1.0 NaN NaN >>> df.groupby([0, 0, 1, 1], axis=1).fillna(method="bfill") key A B C 0 0.0 NaN 2.0 NaN 1 0.0 2.0 3.0 NaN 2 1.0 NaN 2.0 2.0 3 1.0 3.0 NaN NaN 4 1.0 NaN NaN NaN Only replace the first NaN element within a group along rows. >>> df.groupby("key").fillna(method="ffill", limit=1) A B C 0 NaN 2.0 NaN 1 2.0 3.0 NaN 2 NaN NaN 2.0 3 3.0 NaN 2.0 4 3.0 NaN NaN """ result = self._op_via_apply( "fillna", value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, ) return result def take( self, indices: TakeIndexer, axis: Axis | None = 0, **kwargs, ) -> DataFrame: """ Return the elements in the given *positional* indices in each group. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. If a requested index does not exist for some group, this method will raise. To get similar behavior that ignores indices that don't exist, see :meth:`.DataFrameGroupBy.nth`. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- DataFrame An DataFrame containing the elements taken from each group. See Also -------- DataFrame.take : Take elements from a Series along an axis. DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan), ... ('rabbit', 'mammal', 15.0)], ... columns=['name', 'class', 'max_speed'], ... index=[4, 3, 2, 1, 0]) >>> df name class max_speed 4 falcon bird 389.0 3 parrot bird 24.0 2 lion mammal 80.5 1 monkey mammal NaN 0 rabbit mammal 15.0 >>> gb = df.groupby([1, 1, 2, 2, 2]) Take elements at positions 0 and 1 along the axis 0 (default). Note how the indices selected in the result do not correspond to our input indices 0 and 1. That's because we are selecting the 0th and 1st rows, not rows whose indices equal 0 and 1. >>> gb.take([0, 1]) name class max_speed 1 4 falcon bird 389.0 3 parrot bird 24.0 2 2 lion mammal 80.5 1 monkey mammal NaN The order of the specified indices influences the order in the result. Here, the order is swapped from the previous example. >>> gb.take([1, 0]) name class max_speed 1 3 parrot bird 24.0 4 falcon bird 389.0 2 1 monkey mammal NaN 2 lion mammal 80.5 Take elements at indices 1 and 2 along the axis 1 (column selection). We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> gb.take([-1, -2]) name class max_speed 1 3 parrot bird 24.0 4 falcon bird 389.0 2 0 rabbit mammal 15.0 1 monkey mammal NaN """ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) return result def skew( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool = True, numeric_only: bool = False, **kwargs, ) -> DataFrame: """ Return unbiased skew within groups. Normalized by N-1. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default 0 Axis for the function to be applied on. Specifying ``axis=None`` will apply the aggregation across both axes. .. versionadded:: 2.0.0 skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. **kwargs Additional keyword arguments to be passed to the function. Returns ------- DataFrame See Also -------- DataFrame.skew : Return unbiased skew over requested axis. Examples -------- >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi', ... 'lion', 'monkey', 'rabbit'], ... ['bird', 'bird', 'bird', 'bird', ... 'mammal', 'mammal', 'mammal']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class')) >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan, ... 80.5, 21.5, 15.0]}, ... index=index) >>> df max_speed name class falcon bird 389.0 parrot bird 24.0 cockatoo bird 70.0 kiwi bird NaN lion mammal 80.5 monkey mammal 21.5 rabbit mammal 15.0 >>> gb = df.groupby(["class"]) >>> gb.skew() max_speed class bird 1.628296 mammal 1.669046 >>> gb.skew(skipna=False) max_speed class bird NaN mammal 1.669046 """ result = self._op_via_apply( "skew", axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs, ) return result def plot(self) -> GroupByPlot: result = GroupByPlot(self) return result def corr( self, method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson", min_periods: int = 1, numeric_only: bool = False, ) -> DataFrame: result = self._op_via_apply( "corr", method=method, min_periods=min_periods, numeric_only=numeric_only ) return result def cov( self, min_periods: int | None = None, ddof: int | None = 1, numeric_only: bool = False, ) -> DataFrame: result = self._op_via_apply( "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only ) return result def hist( self, column: IndexLabel = None, by=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, ax=None, sharex: bool = False, sharey: bool = False, figsize: tuple[int, int] | None = None, layout: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, ): result = self._op_via_apply( "hist", column=column, by=by, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, bins=bins, backend=backend, legend=legend, **kwargs, ) return result def dtypes(self) -> Series: # error: Incompatible return value type (got "DataFrame", expected "Series") return self.apply(lambda df: df.dtypes) # type: ignore[return-value] def corrwith( self, other: DataFrame | Series, axis: Axis = 0, drop: bool = False, method: CorrelationMethod = "pearson", numeric_only: bool = False, ) -> DataFrame: result = self._op_via_apply( "corrwith", other=other, axis=axis, drop=drop, method=method, numeric_only=numeric_only, ) return result def get_groupby( obj: NDFrame, by: _KeysArgType | None = None, axis: AxisInt = 0, grouper: ops.BaseGrouper | None = None, group_keys: bool = True, ) -> GroupBy: klass: type[GroupBy] if isinstance(obj, Series): from pandas.core.groupby.generic import SeriesGroupBy klass = SeriesGroupBy elif isinstance(obj, DataFrame): from pandas.core.groupby.generic import DataFrameGroupBy klass = DataFrameGroupBy else: # pragma: no cover raise TypeError(f"invalid type: {obj}") return klass( obj=obj, keys=by, axis=axis, grouper=grouper, group_keys=group_keys, )
null
173,106
from __future__ import annotations import datetime from functools import ( partial, wraps, ) import inspect from textwrap import dedent from typing import ( TYPE_CHECKING, Callable, Hashable, Iterable, Iterator, List, Literal, Mapping, Sequence, TypeVar, Union, cast, final, ) import warnings import numpy as np from pandas._config.config import option_context from pandas._libs import ( Timestamp, lib, ) from pandas._libs.algos import rank_1d import pandas._libs.groupby as libgroupby from pandas._libs.missing import NA from pandas._typing import ( AnyArrayLike, ArrayLike, Axis, AxisInt, DtypeObj, FillnaOptions, IndexLabel, NDFrameT, PositionalIndexer, RandomState, Scalar, T, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, DataError, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, doc, ) from pandas.core.dtypes.cast import ensure_dtype_can_hold_na from pandas.core.dtypes.common import ( is_bool_dtype, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, sample, ) from pandas.core._numba import executor from pandas.core.arrays import ( BaseMaskedArray, BooleanArray, Categorical, DatetimeArray, ExtensionArray, FloatingArray, TimedeltaArray, ) from pandas.core.base import ( PandasObject, SelectionMixin, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame from pandas.core.groupby import ( base, numba_, ops, ) from pandas.core.groupby.grouper import get_grouper from pandas.core.groupby.indexing import ( GroupByIndexingMixin, GroupByNthSelector, ) from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, RangeIndex, default_index, ) from pandas.core.internals.blocks import ensure_block_shape from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter from pandas.core.util.numba_ import ( get_jit_arguments, maybe_use_numba, ) def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... The provided code snippet includes necessary dependencies for implementing the `_insert_quantile_level` function. Write a Python function `def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex` to solve the following problem: Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. The quantile level in the MultiIndex is a repeated copy of 'qs'. Parameters ---------- idx : Index qs : np.ndarray[float64] Returns ------- MultiIndex Here is the function: def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex: """ Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. The quantile level in the MultiIndex is a repeated copy of 'qs'. Parameters ---------- idx : Index qs : np.ndarray[float64] Returns ------- MultiIndex """ nqs = len(qs) if idx._is_multi: idx = cast(MultiIndex, idx) lev_codes, lev = Index(qs).factorize() levels = list(idx.levels) + [lev] codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))] mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None]) else: mi = MultiIndex.from_product([idx, qs]) return mi
Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. The quantile level in the MultiIndex is a repeated copy of 'qs'. Parameters ---------- idx : Index qs : np.ndarray[float64] Returns ------- MultiIndex
173,107
from __future__ import annotations import functools import inspect from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import ( NumbaUtilError, jit_user_function, ) class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) The provided code snippet includes necessary dependencies for implementing the `validate_udf` function. Write a Python function `def validate_udf(func: Callable) -> None` to solve the following problem: Validate user defined function for ops when using Numba with groupby ops. The first signature arguments should include: def f(values, index, ...): ... Parameters ---------- func : function, default False user defined function Returns ------- None Raises ------ NumbaUtilError Here is the function: def validate_udf(func: Callable) -> None: """ Validate user defined function for ops when using Numba with groupby ops. The first signature arguments should include: def f(values, index, ...): ... Parameters ---------- func : function, default False user defined function Returns ------- None Raises ------ NumbaUtilError """ if not callable(func): raise NotImplementedError( "Numba engine can only be used with a single function." ) udf_signature = list(inspect.signature(func).parameters.keys()) expected_args = ["values", "index"] min_number_args = len(expected_args) if ( len(udf_signature) < min_number_args or udf_signature[:min_number_args] != expected_args ): raise NumbaUtilError( f"The first {min_number_args} arguments to {func.__name__} must be " f"{expected_args}" )
Validate user defined function for ops when using Numba with groupby ops. The first signature arguments should include: def f(values, index, ...): ... Parameters ---------- func : function, default False user defined function Returns ------- None Raises ------ NumbaUtilError
173,108
from __future__ import annotations import functools import inspect from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import ( NumbaUtilError, jit_user_function, ) Any = object() TYPE_CHECKING = True class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def jit_user_function( func: Callable, nopython: bool, nogil: bool, parallel: bool ) -> Callable: """ JIT the user's function given the configurable arguments. Parameters ---------- func : function user defined function nopython : bool nopython parameter for numba.JIT nogil : bool nogil parameter for numba.JIT parallel : bool parallel parameter for numba.JIT Returns ------- function Numba JITed function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func else: def numba_func(data, *_args): if getattr(np, func.__name__, False) is func or isinstance( func, types.BuiltinFunctionType ): jf = func else: jf = numba.jit(func, nopython=nopython, nogil=nogil) def impl(data, *_args): return jf(data, *_args) return impl return numba_func The provided code snippet includes necessary dependencies for implementing the `generate_numba_agg_func` function. Write a Python function `def generate_numba_agg_func( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]` to solve the following problem: Generate a numba jitted agg function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby agg function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each group and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_numba_agg_func( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: """ Generate a numba jitted agg function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby agg function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each group and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ numba_func = jit_user_function(func, nopython, nogil, parallel) if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_agg( values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any, ) -> np.ndarray: assert len(begin) == len(end) num_groups = len(begin) result = np.empty((num_groups, num_columns)) for i in numba.prange(num_groups): group_index = index[begin[i] : end[i]] for j in numba.prange(num_columns): group = values[begin[i] : end[i], j] result[i, j] = numba_func(group, group_index, *args) return result return group_agg
Generate a numba jitted agg function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby agg function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each group and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,109
from __future__ import annotations import functools import inspect from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import ( NumbaUtilError, jit_user_function, ) Any = object() TYPE_CHECKING = True class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def jit_user_function( func: Callable, nopython: bool, nogil: bool, parallel: bool ) -> Callable: """ JIT the user's function given the configurable arguments. Parameters ---------- func : function user defined function nopython : bool nopython parameter for numba.JIT nogil : bool nogil parameter for numba.JIT parallel : bool parallel parameter for numba.JIT Returns ------- function Numba JITed function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func else: def numba_func(data, *_args): if getattr(np, func.__name__, False) is func or isinstance( func, types.BuiltinFunctionType ): jf = func else: jf = numba.jit(func, nopython=nopython, nogil=nogil) def impl(data, *_args): return jf(data, *_args) return impl return numba_func The provided code snippet includes necessary dependencies for implementing the `generate_numba_transform_func` function. Write a Python function `def generate_numba_transform_func( func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool, ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]` to solve the following problem: Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_numba_transform_func( func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool, ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: """ Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ numba_func = jit_user_function(func, nopython, nogil, parallel) if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_transform( values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any, ) -> np.ndarray: assert len(begin) == len(end) num_groups = len(begin) result = np.empty((len(values), num_columns)) for i in numba.prange(num_groups): group_index = index[begin[i] : end[i]] for j in numba.prange(num_columns): group = values[begin[i] : end[i], j] result[begin[i] : end[i], j] = numba_func(group, group_index, *args) return result return group_transform
Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,110
from __future__ import annotations import collections import functools from typing import ( TYPE_CHECKING, Callable, Generic, Hashable, Iterator, Sequence, final, ) import numpy as np from pandas._libs import ( NaT, lib, ) import pandas._libs.groupby as libgroupby import pandas._libs.reduction as libreduction from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, NDFrameT, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import ( maybe_cast_pointwise_result, maybe_downcast_to_dtype, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_platform_int, ensure_uint64, is_1d_only_ea_dtype, is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_float_dtype, is_integer_dtype, is_numeric_dtype, is_period_dtype, is_sparse, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import ( isna, maybe_fill, ) from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.masked import ( BaseMaskedArray, BaseMaskedDtype, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.frame import DataFrame from pandas.core.groupby import grouper from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, ensure_index, ) from pandas.core.series import Series from pandas.core.sorting import ( compress_group_index, decons_obs_group_ids, get_flattened_list, get_group_index, get_group_index_sorter, get_indexer_dict, ) AxisInt = int class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series def _is_indexed_like(obj, axes, axis: AxisInt) -> bool: if isinstance(obj, Series): if len(axes) > 1: return False return obj.axes[axis].equals(axes[axis]) elif isinstance(obj, DataFrame): return obj.axes[axis].equals(axes[axis]) return False
null
173,111
from __future__ import annotations import collections import functools from typing import ( TYPE_CHECKING, Callable, Generic, Hashable, Iterator, Sequence, final, ) import numpy as np from pandas._libs import ( NaT, lib, ) import pandas._libs.groupby as libgroupby import pandas._libs.reduction as libreduction from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, NDFrameT, Shape, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import ( maybe_cast_pointwise_result, maybe_downcast_to_dtype, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_platform_int, ensure_uint64, is_1d_only_ea_dtype, is_bool_dtype, is_complex_dtype, is_datetime64_any_dtype, is_float_dtype, is_integer_dtype, is_numeric_dtype, is_period_dtype, is_sparse, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import ( isna, maybe_fill, ) from pandas.core.arrays import ( Categorical, DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.masked import ( BaseMaskedArray, BaseMaskedDtype, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.frame import DataFrame from pandas.core.groupby import grouper from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, ensure_index, ) from pandas.core.series import Series from pandas.core.sorting import ( compress_group_index, decons_obs_group_ids, get_flattened_list, get_group_index, get_group_index_sorter, get_indexer_dict, ) class DataSplitter(Generic[NDFrameT]): def __init__( self, data: NDFrameT, labels: npt.NDArray[np.intp], ngroups: int, axis: AxisInt = 0, ) -> None: self.data = data self.labels = ensure_platform_int(labels) # _should_ already be np.intp self.ngroups = ngroups self.axis = axis assert isinstance(axis, int), axis def _slabels(self) -> npt.NDArray[np.intp]: # Sorted labels return self.labels.take(self._sort_idx) def _sort_idx(self) -> npt.NDArray[np.intp]: # Counting sort indexer return get_group_index_sorter(self.labels, self.ngroups) def __iter__(self) -> Iterator: sdata = self._sorted_data if self.ngroups == 0: # we are inside a generator, rather than raise StopIteration # we merely return signal the end return starts, ends = lib.generate_slices(self._slabels, self.ngroups) for start, end in zip(starts, ends): yield self._chop(sdata, slice(start, end)) def _sorted_data(self) -> NDFrameT: return self.data.take(self._sort_idx, axis=self.axis) def _chop(self, sdata, slice_obj: slice) -> NDFrame: raise AbstractMethodError(self) class SeriesSplitter(DataSplitter): def _chop(self, sdata: Series, slice_obj: slice) -> Series: # fastpath equivalent to `sdata.iloc[slice_obj]` mgr = sdata._mgr.get_slice(slice_obj) ser = sdata._constructor(mgr, name=sdata.name, fastpath=True) return ser.__finalize__(sdata, method="groupby") class FrameSplitter(DataSplitter): def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: # Fastpath equivalent to: # if self.axis == 0: # return sdata.iloc[slice_obj] # else: # return sdata.iloc[:, slice_obj] mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis) df = sdata._constructor(mgr) return df.__finalize__(sdata, method="groupby") AxisInt = int class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def _get_splitter( data: NDFrame, labels: np.ndarray, ngroups: int, axis: AxisInt = 0 ) -> DataSplitter: if isinstance(data, Series): klass: type[DataSplitter] = SeriesSplitter else: # i.e. DataFrame klass = FrameSplitter return klass(data, labels, ngroups, axis)
null
173,112
from __future__ import annotations from typing import ( TYPE_CHECKING, Hashable, Iterator, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._typing import ( ArrayLike, Axis, NDFrameT, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, is_list_like, is_scalar, ) from pandas.core import algorithms from pandas.core.arrays import ( Categorical, ExtensionArray, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.groupby import ops from pandas.core.groupby.categorical import recode_for_groupby from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, ) from pandas.core.series import Series from pandas.io.formats.printing import pprint_thing class Grouper: """ A Grouper allows the user to specify a groupby instruction for an object. This specification will select a column via the key parameter, or if the level and/or axis parameters are given, a level of the index of the target object. If `axis` and/or `level` are passed as keywords to both `Grouper` and `groupby`, the values passed to `Grouper` take precedence. Parameters ---------- key : str, defaults to None Groupby key, which selects the grouping column of the target. level : name/number, defaults to None The level for the target index. freq : str / frequency object, defaults to None This will groupby the specified frequency if the target selection (via key or level) is a datetime-like object. For full specification of available frequencies, please see `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_. axis : str, int, defaults to 0 Number/name of the axis. sort : bool, default to False Whether to sort the resulting labels. closed : {'left' or 'right'} Closed end of interval. Only when `freq` parameter is passed. label : {'left' or 'right'} Interval boundary to use for labeling. Only when `freq` parameter is passed. convention : {'start', 'end', 'e', 's'} If grouper is PeriodIndex and `freq` parameter is passed. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 dropna : bool, default True If True, and if group keys contain NA values, NA values together with row/column will be dropped. If False, NA values will also be treated as the key in groups. .. versionadded:: 1.2.0 Returns ------- A specification for a groupby instruction Examples -------- Syntactic sugar for ``df.groupby('A')`` >>> df = pd.DataFrame( ... { ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"], ... "Speed": [100, 5, 200, 300, 15], ... } ... ) >>> df Animal Speed 0 Falcon 100 1 Parrot 5 2 Falcon 200 3 Falcon 300 4 Parrot 15 >>> df.groupby(pd.Grouper(key="Animal")).mean() Speed Animal Falcon 200.0 Parrot 10.0 Specify a resample operation on the column 'Publish date' >>> df = pd.DataFrame( ... { ... "Publish date": [ ... pd.Timestamp("2000-01-02"), ... pd.Timestamp("2000-01-02"), ... pd.Timestamp("2000-01-09"), ... pd.Timestamp("2000-01-16") ... ], ... "ID": [0, 1, 2, 3], ... "Price": [10, 20, 30, 40] ... } ... ) >>> df Publish date ID Price 0 2000-01-02 0 10 1 2000-01-02 1 20 2 2000-01-09 2 30 3 2000-01-16 3 40 >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean() ID Price Publish date 2000-01-02 0.5 15.0 2000-01-09 2.0 30.0 2000-01-16 3.0 40.0 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.groupby(pd.Grouper(freq='17min')).sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 To replace the use of the deprecated `base` argument, you can now use `offset`, in this example it is equivalent to have `base=2`: >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum() 2000-10-01 23:16:00 0 2000-10-01 23:33:00 9 2000-10-01 23:50:00 36 2000-10-02 00:07:00 39 2000-10-02 00:24:00 24 Freq: 17T, dtype: int64 """ sort: bool dropna: bool _gpr_index: Index | None _grouper: Index | None _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna") def __new__(cls, *args, **kwargs): if kwargs.get("freq") is not None: from pandas.core.resample import TimeGrouper cls = TimeGrouper return super().__new__(cls) def __init__( self, key=None, level=None, freq=None, axis: Axis = 0, sort: bool = False, dropna: bool = True, ) -> None: self.key = key self.level = level self.freq = freq self.axis = axis self.sort = sort self.dropna = dropna self._grouper_deprecated = None self._indexer_deprecated = None self._obj_deprecated = None self._gpr_index = None self.binner = None self._grouper = None self._indexer = None def _get_grouper( self, obj: NDFrameT, validate: bool = True ) -> tuple[ops.BaseGrouper, NDFrameT]: """ Parameters ---------- obj : Series or DataFrame validate : bool, default True if True, validate the grouper Returns ------- a tuple of grouper, obj (possibly sorted) """ obj, _, _ = self._set_grouper(obj) grouper, _, obj = get_grouper( obj, [self.key], axis=self.axis, level=self.level, sort=self.sort, validate=validate, dropna=self.dropna, ) # Without setting this, subsequent lookups to .groups raise # error: Incompatible types in assignment (expression has type "BaseGrouper", # variable has type "None") self._grouper_deprecated = grouper # type: ignore[assignment] return grouper, obj def _set_grouper( self, obj: NDFrame, sort: bool = False, *, gpr_index: Index | None = None ): """ given an object and the specifications, setup the internal grouper for this particular specification Parameters ---------- obj : Series or DataFrame sort : bool, default False whether the resulting grouper should be sorted gpr_index : Index or None, default None Returns ------- NDFrame Index np.ndarray[np.intp] | None """ assert obj is not None indexer = None if self.key is not None and self.level is not None: raise ValueError("The Grouper cannot specify both a key and a level!") # Keep self._grouper value before overriding if self._grouper is None: # TODO: What are we assuming about subsequent calls? self._grouper = gpr_index self._indexer = self._indexer_deprecated # the key must be a valid info item if self.key is not None: key = self.key # The 'on' is already defined if getattr(gpr_index, "name", None) == key and isinstance(obj, Series): # Sometimes self._grouper will have been resorted while # obj has not. In this case there is a mismatch when we # call self._grouper.take(obj.index) so we need to undo the sorting # before we call _grouper.take. assert self._grouper is not None if self._indexer is not None: reverse_indexer = self._indexer.argsort() unsorted_ax = self._grouper.take(reverse_indexer) ax = unsorted_ax.take(obj.index) else: ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError(f"The grouper name {key} is not found") ax = Index(obj[key], name=key) else: ax = obj._get_axis(self.axis) if self.level is not None: level = self.level # if a level is given it must be a mi level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) ax = Index(ax._get_level_values(level), name=ax.names[level]) else: if level not in (0, ax.name): raise ValueError(f"The level {level} is not valid") # possibly sort if (self.sort or sort) and not ax.is_monotonic_increasing: # use stable sort to support first, last, nth # TODO: why does putting na_position="first" fix datetimelike cases? indexer = self._indexer_deprecated = ax.array.argsort( kind="mergesort", na_position="first" ) ax = ax.take(indexer) obj = obj.take(indexer, axis=self.axis) # error: Incompatible types in assignment (expression has type # "NDFrameT", variable has type "None") self._obj_deprecated = obj # type: ignore[assignment] self._gpr_index = ax return obj, ax, indexer def ax(self) -> Index: warnings.warn( f"{type(self).__name__}.ax is deprecated and will be removed in a " "future version. Use Resampler.ax instead", FutureWarning, stacklevel=find_stack_level(), ) index = self._gpr_index if index is None: raise ValueError("_set_grouper must be called before ax is accessed") return index def indexer(self): warnings.warn( f"{type(self).__name__}.indexer is deprecated and will be removed " "in a future version. Use Resampler.indexer instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._indexer_deprecated def obj(self): warnings.warn( f"{type(self).__name__}.obj is deprecated and will be removed " "in a future version. Use GroupBy.indexer instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._obj_deprecated def grouper(self): warnings.warn( f"{type(self).__name__}.grouper is deprecated and will be removed " "in a future version. Use GroupBy.grouper instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._grouper_deprecated def groups(self): warnings.warn( f"{type(self).__name__}.groups is deprecated and will be removed " "in a future version. Use GroupBy.groups instead.", FutureWarning, stacklevel=find_stack_level(), ) # error: "None" has no attribute "groups" return self._grouper_deprecated.groups # type: ignore[attr-defined] def __repr__(self) -> str: attrs_list = ( f"{attr_name}={repr(getattr(self, attr_name))}" for attr_name in self._attributes if getattr(self, attr_name) is not None ) attrs = ", ".join(attrs_list) cls_name = type(self).__name__ return f"{cls_name}({attrs})" class Grouping: """ Holds the grouping information for a single key Parameters ---------- index : Index grouper : obj : DataFrame or Series name : Label level : observed : bool, default False If we are a Categorical, use the observed values in_axis : if the Grouping is a column in self.obj and hence among Groupby.exclusions list dropna : bool, default True Whether to drop NA groups. uniques : Array-like, optional When specified, will be used for unique values. Enables including empty groups in the result for a BinGrouper. Must not contain duplicates. Attributes ------- indices : dict Mapping of {group -> index_list} codes : ndarray Group codes group_index : Index or None unique groups groups : dict Mapping of {group -> label_list} """ _codes: npt.NDArray[np.signedinteger] | None = None _group_index: Index | None = None _all_grouper: Categorical | None _orig_cats: Index | None _index: Index def __init__( self, index: Index, grouper=None, obj: NDFrame | None = None, level=None, sort: bool = True, observed: bool = False, in_axis: bool = False, dropna: bool = True, uniques: ArrayLike | None = None, ) -> None: self.level = level self._orig_grouper = grouper grouping_vector = _convert_grouper(index, grouper) self._all_grouper = None self._orig_cats = None self._index = index self._sort = sort self.obj = obj self._observed = observed self.in_axis = in_axis self._dropna = dropna self._uniques = uniques # we have a single grouper which may be a myriad of things, # some of which are dependent on the passing in level ilevel = self._ilevel if ilevel is not None: # In extant tests, the new self.grouping_vector matches # `index.get_level_values(ilevel)` whenever # mapper is None and isinstance(index, MultiIndex) if isinstance(index, MultiIndex): index_level = index.get_level_values(ilevel) else: index_level = index if grouping_vector is None: grouping_vector = index_level else: mapper = grouping_vector grouping_vector = index_level.map(mapper) # a passed Grouper like, directly get the grouper in the same way # as single grouper groupby, use the group_info to get codes elif isinstance(grouping_vector, Grouper): # get the new grouper; we already have disambiguated # what key/level refer to exactly, don't need to # check again as we have by this point converted these # to an actual value (rather than a pd.Grouper) assert self.obj is not None # for mypy newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False) self.obj = newobj if isinstance(newgrouper, ops.BinGrouper): # TODO: can we unwrap this and get a tighter typing # for self.grouping_vector? grouping_vector = newgrouper else: # ops.BaseGrouper # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1. # If that were to occur, would we be throwing out information? # error: Cannot determine type of "grouping_vector" [has-type] ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type] # use Index instead of ndarray so we can recover the name grouping_vector = Index(ng, name=newgrouper.result_index.name) elif not isinstance( grouping_vector, (Series, Index, ExtensionArray, np.ndarray) ): # no level passed if getattr(grouping_vector, "ndim", 1) != 1: t = str(type(grouping_vector)) raise ValueError(f"Grouper for '{t}' not 1-dimensional") grouping_vector = index.map(grouping_vector) if not ( hasattr(grouping_vector, "__len__") and len(grouping_vector) == len(index) ): grper = pprint_thing(grouping_vector) errmsg = ( "Grouper result violates len(labels) == " f"len(data)\nresult: {grper}" ) raise AssertionError(errmsg) if isinstance(grouping_vector, np.ndarray): if grouping_vector.dtype.kind in ["m", "M"]: # if we have a date/time-like grouper, make sure that we have # Timestamps like # TODO 2022-10-08 we only have one test that gets here and # values are already in nanoseconds in that case. grouping_vector = Series(grouping_vector).to_numpy() elif is_categorical_dtype(grouping_vector): # a passed Categorical self._orig_cats = grouping_vector.categories grouping_vector, self._all_grouper = recode_for_groupby( grouping_vector, sort, observed ) self.grouping_vector = grouping_vector def __repr__(self) -> str: return f"Grouping({self.name})" def __iter__(self) -> Iterator: return iter(self.indices) def _passed_categorical(self) -> bool: return is_categorical_dtype(self.grouping_vector) def name(self) -> Hashable: ilevel = self._ilevel if ilevel is not None: return self._index.names[ilevel] if isinstance(self._orig_grouper, (Index, Series)): return self._orig_grouper.name elif isinstance(self.grouping_vector, ops.BaseGrouper): return self.grouping_vector.result_index.name elif isinstance(self.grouping_vector, Index): return self.grouping_vector.name # otherwise we have ndarray or ExtensionArray -> no name return None def _ilevel(self) -> int | None: """ If necessary, converted index level name to index level position. """ level = self.level if level is None: return None if not isinstance(level, int): index = self._index if level not in index.names: raise AssertionError(f"Level {level} not in index") return index.names.index(level) return level def ngroups(self) -> int: return len(self.group_index) def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: # we have a list of groupers if isinstance(self.grouping_vector, ops.BaseGrouper): return self.grouping_vector.indices values = Categorical(self.grouping_vector) return values._reverse_indexer() def codes(self) -> npt.NDArray[np.signedinteger]: return self._codes_and_uniques[0] def group_arraylike(self) -> ArrayLike: """ Analogous to result_index, but holding an ArrayLike to ensure we can retain ExtensionDtypes. """ if self._all_grouper is not None: # retain dtype for categories, including unobserved ones return self.result_index._values elif self._passed_categorical: return self.group_index._values return self._codes_and_uniques[1] def result_index(self) -> Index: # result_index retains dtype for categories, including unobserved ones, # which group_index does not if self._all_grouper is not None: group_idx = self.group_index assert isinstance(group_idx, CategoricalIndex) cats = self._orig_cats # set_categories is dynamically added return group_idx.set_categories(cats) # type: ignore[attr-defined] return self.group_index def group_index(self) -> Index: codes, uniques = self._codes_and_uniques if not self._dropna and self._passed_categorical: assert isinstance(uniques, Categorical) if self._sort and (codes == len(uniques)).any(): # Add NA value on the end when sorting uniques = Categorical.from_codes( np.append(uniques.codes, [-1]), uniques.categories ) elif len(codes) > 0: # Need to determine proper placement of NA value when not sorting cat = self.grouping_vector na_idx = (cat.codes < 0).argmax() if cat.codes[na_idx] < 0: # count number of unique codes that comes before the nan value na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx]) uniques = Categorical.from_codes( np.insert(uniques.codes, na_unique_idx, -1), uniques.categories ) return Index._with_infer(uniques, name=self.name) def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]: uniques: ArrayLike if self._passed_categorical: # we make a CategoricalIndex out of the cat grouper # preserving the categories / ordered attributes; # doesn't (yet - GH#46909) handle dropna=False cat = self.grouping_vector categories = cat.categories if self._observed: ucodes = algorithms.unique1d(cat.codes) ucodes = ucodes[ucodes != -1] if self._sort: ucodes = np.sort(ucodes) else: ucodes = np.arange(len(categories)) uniques = Categorical.from_codes( codes=ucodes, categories=categories, ordered=cat.ordered ) codes = cat.codes if not self._dropna: na_mask = codes < 0 if np.any(na_mask): if self._sort: # Replace NA codes with `largest code + 1` na_code = len(categories) codes = np.where(na_mask, na_code, codes) else: # Insert NA code into the codes based on first appearance # A negative code must exist, no need to check codes[na_idx] < 0 na_idx = na_mask.argmax() # count number of unique codes that comes before the nan value na_code = algorithms.nunique_ints(codes[:na_idx]) codes = np.where(codes >= na_code, codes + 1, codes) codes = np.where(na_mask, na_code, codes) if not self._observed: uniques = uniques.reorder_categories(self._orig_cats) return codes, uniques elif isinstance(self.grouping_vector, ops.BaseGrouper): # we have a list of groupers codes = self.grouping_vector.codes_info uniques = self.grouping_vector.result_index._values elif self._uniques is not None: # GH#50486 Code grouping_vector using _uniques; allows # including uniques that are not present in grouping_vector. cat = Categorical(self.grouping_vector, categories=self._uniques) codes = cat.codes uniques = self._uniques else: # GH35667, replace dropna=False with use_na_sentinel=False # error: Incompatible types in assignment (expression has type "Union[ # ndarray[Any, Any], Index]", variable has type "Categorical") codes, uniques = algorithms.factorize( # type: ignore[assignment] self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna ) return codes, uniques def groups(self) -> dict[Hashable, np.ndarray]: return self._index.groupby(Categorical.from_codes(self.codes, self.group_index)) def _is_label_like(val) -> bool: return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val)) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... def using_copy_on_write(): _mode_options = _global_config["mode"] return _mode_options["copy_on_write"] and _mode_options["data_manager"] == "block" NDFrameT = TypeVar("NDFrameT", bound="NDFrame") Axis = Union[AxisInt, Literal["index", "columns", "rows"]] class InvalidIndexError(Exception): """ Exception raised when attempting to use an invalid index key. .. versionadded:: 1.1.0 """ class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `get_grouper` function. Write a Python function `def get_grouper( obj: NDFrameT, key=None, axis: Axis = 0, level=None, sort: bool = True, observed: bool = False, validate: bool = True, dropna: bool = True, ) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]` to solve the following problem: Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. Here is the function: def get_grouper( obj: NDFrameT, key=None, axis: Axis = 0, level=None, sort: bool = True, observed: bool = False, validate: bool = True, dropna: bool = True, ) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError("No group keys passed!") else: raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj._get_axis(axis).name != level: raise ValueError( f"level name {level} is not the name " f"of the {obj._get_axis_name(axis)}" ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, frozenset(), obj else: return grouper, frozenset({key.key}), obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): return key, frozenset(), obj if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys) any_arraylike = any( isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys ) # is this an index replacement? if ( not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None ): if isinstance(obj, DataFrame): all_in_columns_index = all( g in obj.columns or g in obj.index.names for g in keys ) else: assert isinstance(obj, Series) all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: list[Grouping] = [] exclusions: set[Hashable] = set() # if the actual grouper should be obj[key] def is_in_axis(key) -> bool: if not _is_label_like(key): if obj.ndim == 1: return False # items -> .columns for DataFrame, .index for Series items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): # TypeError shows up here if we pass e.g. an Index return False return True # if the grouper is obj[name] def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False if using_copy_on_write(): # For the CoW case, we check the references to determine if the # series is part of the object try: obj_gpr_column = obj[gpr.name] except (KeyError, IndexError, InvalidIndexError): return False if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series): return gpr._mgr.references_same_values( # type: ignore[union-attr] obj_gpr_column._mgr, 0 # type: ignore[arg-type] ) return False try: return gpr is obj[gpr.name] except (KeyError, IndexError, InvalidIndexError): # IndexError reached in e.g. test_skip_group_keys when we pass # lambda here # InvalidIndexError raised on key-types inappropriate for index, # e.g. DatetimeIndex.get_loc(tuple()) return False for gpr, level in zip(keys, levels): if is_in_obj(gpr): # df.groupby(df['name']) in_axis = True exclusions.add(gpr.name) elif is_in_axis(gpr): # df.groupby('name') if obj.ndim != 1 and gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] if gpr.ndim != 1: # non-unique columns; raise here to get the name in the # exception message raise ValueError(f"Grouper for '{name}' not 1-dimensional") exclusions.add(name) elif obj._is_level_reference(gpr, axis=axis): in_axis, level, gpr = False, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.add(gpr.key) in_axis = True else: in_axis = False # create the Grouping # allow us to passing the actual Grouping as the gpr ping = ( Grouping( group_axis, gpr, obj=obj, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr ) groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") if len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna) return grouper, frozenset(exclusions), obj
Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps.
173,113
from __future__ import annotations from typing import ( TYPE_CHECKING, Hashable, Iterator, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._typing import ( ArrayLike, Axis, NDFrameT, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, is_list_like, is_scalar, ) from pandas.core import algorithms from pandas.core.arrays import ( Categorical, ExtensionArray, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.groupby import ops from pandas.core.groupby.categorical import recode_for_groupby from pandas.core.indexes.api import ( CategoricalIndex, Index, MultiIndex, ) from pandas.core.series import Series from pandas.io.formats.printing import pprint_thing class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series def _convert_grouper(axis: Index, grouper): if isinstance(grouper, dict): return grouper.get elif isinstance(grouper, Series): if grouper.index.equals(axis): return grouper._values else: return grouper.reindex(axis)._values elif isinstance(grouper, MultiIndex): return grouper._values elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)): if len(grouper) != len(axis): raise ValueError("Grouper and axis must be same length") if isinstance(grouper, (list, tuple)): grouper = com.asarray_tuplesafe(grouper) return grouper else: return grouper
null
173,114
from __future__ import annotations import abc from collections import defaultdict from contextlib import nullcontext from functools import partial import inspect from typing import ( TYPE_CHECKING, Any, Callable, ContextManager, DefaultDict, Dict, Hashable, Iterable, Iterator, List, Sequence, cast, ) import numpy as np from pandas._config import option_context from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt, ) from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, ABCSeries, ) from pandas.core.algorithms import safe_sort from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike class FrameApply(NDFrameApply): obj: DataFrame # --------------------------------------------------------------- # Abstract Methods def result_index(self) -> Index: pass def result_columns(self) -> Index: pass def series_generator(self) -> Iterator[Series]: pass def wrap_results_for_axis( self, results: ResType, res_index: Index ) -> DataFrame | Series: pass # --------------------------------------------------------------- def res_columns(self) -> Index: return self.result_columns def columns(self) -> Index: return self.obj.columns def values(self): return self.obj.values def dtypes(self) -> Series: return self.obj.dtypes def apply(self) -> DataFrame | Series: """compute the results""" # dispatch to agg if is_list_like(self.f): return self.apply_multiple() # all empty if len(self.columns) == 0 and len(self.index) == 0: return self.apply_empty_result() # string dispatch if isinstance(self.f, str): return self.apply_str() # ufunc elif isinstance(self.f, np.ufunc): with np.errstate(all="ignore"): results = self.obj._mgr.apply("apply", func=self.f) # _constructor will retain self.index and self.columns return self.obj._constructor(data=results) # broadcasting if self.result_type == "broadcast": return self.apply_broadcast(self.obj) # one axis empty elif not all(self.obj.shape): return self.apply_empty_result() # raw elif self.raw: return self.apply_raw() return self.apply_standard() def agg(self): obj = self.obj axis = self.axis # TODO: Avoid having to change state self.obj = self.obj if self.axis == 0 else self.obj.T self.axis = 0 result = None try: result = super().agg() finally: self.obj = obj self.axis = axis if axis == 1: result = result.T if result is not None else result if result is None: result = self.obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) return result def apply_empty_result(self): """ we have an empty result; at least 1 axis is 0 we will try to apply the function to an empty series in order to see if this is a reduction function """ assert callable(self.f) # we are not asked to reduce or infer reduction # so just return a copy of the existing object if self.result_type not in ["reduce", None]: return self.obj.copy() # we may need to infer should_reduce = self.result_type == "reduce" from pandas import Series if not should_reduce: try: if self.axis == 0: r = self.f(Series([], dtype=np.float64)) else: r = self.f(Series(index=self.columns, dtype=np.float64)) except Exception: pass else: should_reduce = not isinstance(r, Series) if should_reduce: if len(self.agg_axis): r = self.f(Series([], dtype=np.float64)) else: r = np.nan return self.obj._constructor_sliced(r, index=self.agg_axis) else: return self.obj.copy() def apply_raw(self): """apply to the values as a numpy array""" def wrap_function(func): """ Wrap user supplied function to work around numpy issue. see https://github.com/numpy/numpy/issues/8352 """ def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, str): result = np.array(result, dtype=object) return result return wrapper result = np.apply_along_axis(wrap_function(self.f), self.axis, self.values) # TODO: mixed type case if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis) def apply_broadcast(self, target: DataFrame) -> DataFrame: assert callable(self.f) result_values = np.empty_like(target.values) # axis which we want to compare compliance result_compare = target.shape[0] for i, col in enumerate(target.columns): res = self.f(target[col]) ares = np.asarray(res).ndim # must be a scalar or 1d if ares > 1: raise ValueError("too many dims to broadcast") if ares == 1: # must match return dim if result_compare != len(res): raise ValueError("cannot broadcast result") result_values[:, i] = res # we *always* preserve the original index / columns result = self.obj._constructor( result_values, index=target.index, columns=target.columns ) return result def apply_standard(self): results, res_index = self.apply_series_generator() # wrap results return self.wrap_results(results, res_index) def apply_series_generator(self) -> tuple[ResType, Index]: assert callable(self.f) series_gen = self.series_generator res_index = self.result_index results = {} with option_context("mode.chained_assignment", None): for i, v in enumerate(series_gen): # ignore SettingWithCopy here in case the user mutates results[i] = self.f(v) if isinstance(results[i], ABCSeries): # If we have a view on v, we need to make a copy because # series_generator will swap out the underlying data results[i] = results[i].copy(deep=False) return results, res_index def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series: from pandas import Series # see if we can infer the results if len(results) > 0 and 0 in results and is_sequence(results[0]): return self.wrap_results_for_axis(results, res_index) # dict of scalars # the default dtype of an empty Series is `object`, but this # code can be hit by df.mean() where the result should have dtype # float64 even if it's an empty Series. constructor_sliced = self.obj._constructor_sliced if len(results) == 0 and constructor_sliced is Series: result = constructor_sliced(results, dtype=np.float64) else: result = constructor_sliced(results) result.index = res_index return result def apply_str(self) -> DataFrame | Series: # Caller is responsible for checking isinstance(self.f, str) # TODO: GH#39993 - Avoid special-casing by replacing with lambda if self.f == "size": # Special-cased because DataFrame.size returns a single scalar obj = self.obj value = obj.shape[self.axis] return obj._constructor_sliced(value, index=self.agg_axis) return super().apply_str() class FrameRowApply(FrameApply): axis: AxisInt = 0 def series_generator(self): return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) def result_index(self) -> Index: return self.columns def result_columns(self) -> Index: return self.index def wrap_results_for_axis( self, results: ResType, res_index: Index ) -> DataFrame | Series: """return the results for the rows""" if self.result_type == "reduce": # e.g. test_apply_dict GH#8735 res = self.obj._constructor_sliced(results) res.index = res_index return res elif self.result_type is None and all( isinstance(x, dict) for x in results.values() ): # Our operation was a to_dict op e.g. # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544 res = self.obj._constructor_sliced(results) res.index = res_index return res try: result = self.obj._constructor(data=results) except ValueError as err: if "All arrays must be of the same length" in str(err): # e.g. result = [[2, 3], [1.5], ['foo', 'bar']] # see test_agg_listlike_result GH#29587 res = self.obj._constructor_sliced(results) res.index = res_index return res else: raise if not isinstance(results[0], ABCSeries): if len(result.index) == len(self.res_columns): result.index = self.res_columns if len(result.columns) == len(res_index): result.columns = res_index return result class FrameColumnApply(FrameApply): axis: AxisInt = 1 def apply_broadcast(self, target: DataFrame) -> DataFrame: result = super().apply_broadcast(target.T) return result.T def series_generator(self): values = self.values values = ensure_wrapped_if_datetimelike(values) assert len(values) > 0 # We create one Series object, and will swap out the data inside # of it. Kids: don't do this at home. ser = self.obj._ixs(0, axis=0) mgr = ser._mgr if is_extension_array_dtype(ser.dtype): # values will be incorrect for this block # TODO(EA2D): special case would be unnecessary with 2D EAs obj = self.obj for i in range(len(obj)): yield obj._ixs(i, axis=0) else: for arr, name in zip(values, self.index): # GH#35462 re-pin mgr in case setitem changed it ser._mgr = mgr mgr.set_values(arr) object.__setattr__(ser, "_name", name) yield ser def result_index(self) -> Index: return self.index def result_columns(self) -> Index: return self.columns def wrap_results_for_axis( self, results: ResType, res_index: Index ) -> DataFrame | Series: """return the results for the columns""" result: DataFrame | Series # we have requested to expand if self.result_type == "expand": result = self.infer_to_same_shape(results, res_index) # we have a non-series and don't want inference elif not isinstance(results[0], ABCSeries): result = self.obj._constructor_sliced(results) result.index = res_index # we may want to infer results else: result = self.infer_to_same_shape(results, res_index) return result def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: """infer the results to the same shape as the input object""" result = self.obj._constructor(data=results) result = result.T # set the index result.index = res_index # infer dtypes result = result.infer_objects(copy=False) return result Axis = Union[AxisInt, Literal["index", "columns", "rows"]] AggFuncType = Union[ AggFuncTypeBase, List[AggFuncTypeBase], AggFuncTypeDict, ] The provided code snippet includes necessary dependencies for implementing the `frame_apply` function. Write a Python function `def frame_apply( obj: DataFrame, func: AggFuncType, axis: Axis = 0, raw: bool = False, result_type: str | None = None, args=None, kwargs=None, ) -> FrameApply` to solve the following problem: construct and return a row or column based frame apply object Here is the function: def frame_apply( obj: DataFrame, func: AggFuncType, axis: Axis = 0, raw: bool = False, result_type: str | None = None, args=None, kwargs=None, ) -> FrameApply: """construct and return a row or column based frame apply object""" axis = obj._get_axis_number(axis) klass: type[FrameApply] if axis == 0: klass = FrameRowApply elif axis == 1: klass = FrameColumnApply return klass( obj, func, raw=raw, result_type=result_type, args=args, kwargs=kwargs, )
construct and return a row or column based frame apply object
173,115
from __future__ import annotations import abc from collections import defaultdict from contextlib import nullcontext from functools import partial import inspect from typing import ( TYPE_CHECKING, Any, Callable, ContextManager, DefaultDict, Dict, Hashable, Iterable, Iterator, List, Sequence, cast, ) import numpy as np from pandas._config import option_context from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt, ) from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, ABCSeries, ) from pandas.core.algorithms import safe_sort from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike def is_multi_agg_with_relabel(**kwargs) -> bool: """ Check whether kwargs passed to .agg look like multi-agg with relabeling. Parameters ---------- **kwargs : dict Returns ------- bool Examples -------- >>> is_multi_agg_with_relabel(a="max") False >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min")) True >>> is_multi_agg_with_relabel() False """ return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and ( len(kwargs) > 0 ) def normalize_keyword_aggregation( kwargs: dict, ) -> tuple[dict, list[str], npt.NDArray[np.intp]]: """ Normalize user-provided "named aggregation" kwargs. Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs to the old Dict[str, List[scalar]]]. Parameters ---------- kwargs : dict Returns ------- aggspec : dict The transformed kwargs. columns : List[str] The user-provided keys. col_idx_order : List[int] List of columns indices. Examples -------- >>> normalize_keyword_aggregation({"output": ("input", "sum")}) (defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0])) """ from pandas.core.indexes.base import Index # Normalize the aggregation functions as Mapping[column, List[func]], # process normally, then fixup the names. # TODO: aggspec type: typing.Dict[str, List[AggScalar]] # May be hitting https://github.com/python/mypy/issues/5958 # saying it doesn't have an attribute __name__ aggspec: DefaultDict = defaultdict(list) order = [] columns, pairs = list(zip(*kwargs.items())) for column, aggfunc in pairs: aggspec[column].append(aggfunc) order.append((column, com.get_callable_name(aggfunc) or aggfunc)) # uniquify aggfunc name if duplicated in order list uniquified_order = _make_unique_kwarg_list(order) # GH 25719, due to aggspec will change the order of assigned columns in aggregation # uniquified_aggspec will store uniquified order list and will compare it with order # based on index aggspec_order = [ (column, com.get_callable_name(aggfunc) or aggfunc) for column, aggfuncs in aggspec.items() for aggfunc in aggfuncs ] uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) # get the new index of columns by comparison col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) return aggspec, columns, col_idx_order AggFuncType = Union[ AggFuncTypeBase, List[AggFuncTypeBase], AggFuncTypeDict, ] class SpecificationError(Exception): """ Exception raised by ``agg`` when the functions are ill-specified. The exception raised in two scenarios. The first way is calling ``agg`` on a Dataframe or Series using a nested renamer (dict-of-dict). The second way is calling ``agg`` on a Dataframe with duplicated functions names without assigning column name. Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2], ... 'B': range(5), ... 'C': range(5)}) >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported """ The provided code snippet includes necessary dependencies for implementing the `reconstruct_func` function. Write a Python function `def reconstruct_func( func: AggFuncType | None, **kwargs ) -> tuple[bool, AggFuncType | None, list[str] | None, npt.NDArray[np.intp] | None]` to solve the following problem: This is the internal function to reconstruct func given if there is relabeling or not and also normalize the keyword to get new order of columns. If named aggregation is applied, `func` will be None, and kwargs contains the column and aggregation function information to be parsed; If named aggregation is not applied, `func` is either string (e.g. 'min') or Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) If relabeling is True, will return relabeling, reconstructed func, column names, and the reconstructed order of columns. If relabeling is False, the columns and order will be None. Parameters ---------- func: agg function (e.g. 'min' or Callable) or list of agg functions (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). **kwargs: dict, kwargs used in is_multi_agg_with_relabel and normalize_keyword_aggregation function for relabelling Returns ------- relabelling: bool, if there is relabelling or not func: normalized and mangled func columns: list of column names order: array of columns indices Examples -------- >>> reconstruct_func(None, **{"foo": ("col", "min")}) (True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0])) >>> reconstruct_func("min") (False, 'min', None, None) Here is the function: def reconstruct_func( func: AggFuncType | None, **kwargs ) -> tuple[bool, AggFuncType | None, list[str] | None, npt.NDArray[np.intp] | None]: """ This is the internal function to reconstruct func given if there is relabeling or not and also normalize the keyword to get new order of columns. If named aggregation is applied, `func` will be None, and kwargs contains the column and aggregation function information to be parsed; If named aggregation is not applied, `func` is either string (e.g. 'min') or Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) If relabeling is True, will return relabeling, reconstructed func, column names, and the reconstructed order of columns. If relabeling is False, the columns and order will be None. Parameters ---------- func: agg function (e.g. 'min' or Callable) or list of agg functions (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). **kwargs: dict, kwargs used in is_multi_agg_with_relabel and normalize_keyword_aggregation function for relabelling Returns ------- relabelling: bool, if there is relabelling or not func: normalized and mangled func columns: list of column names order: array of columns indices Examples -------- >>> reconstruct_func(None, **{"foo": ("col", "min")}) (True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0])) >>> reconstruct_func("min") (False, 'min', None, None) """ relabeling = func is None and is_multi_agg_with_relabel(**kwargs) columns: list[str] | None = None order: npt.NDArray[np.intp] | None = None if not relabeling: if isinstance(func, list) and len(func) > len(set(func)): # GH 28426 will raise error if duplicated function names are used and # there is no reassigned name raise SpecificationError( "Function names must be unique if there is no new column names " "assigned" ) if func is None: # nicer error message raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") if relabeling: func, columns, order = normalize_keyword_aggregation(kwargs) return relabeling, func, columns, order
This is the internal function to reconstruct func given if there is relabeling or not and also normalize the keyword to get new order of columns. If named aggregation is applied, `func` will be None, and kwargs contains the column and aggregation function information to be parsed; If named aggregation is not applied, `func` is either string (e.g. 'min') or Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) If relabeling is True, will return relabeling, reconstructed func, column names, and the reconstructed order of columns. If relabeling is False, the columns and order will be None. Parameters ---------- func: agg function (e.g. 'min' or Callable) or list of agg functions (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). **kwargs: dict, kwargs used in is_multi_agg_with_relabel and normalize_keyword_aggregation function for relabelling Returns ------- relabelling: bool, if there is relabelling or not func: normalized and mangled func columns: list of column names order: array of columns indices Examples -------- >>> reconstruct_func(None, **{"foo": ("col", "min")}) (True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0])) >>> reconstruct_func("min") (False, 'min', None, None)
173,116
from __future__ import annotations import abc from collections import defaultdict from contextlib import nullcontext from functools import partial import inspect from typing import ( TYPE_CHECKING, Any, Callable, ContextManager, DefaultDict, Dict, Hashable, Iterable, Iterator, List, Sequence, cast, ) import numpy as np from pandas._config import option_context from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt, ) from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, ABCSeries, ) from pandas.core.algorithms import safe_sort from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `relabel_result` function. Write a Python function `def relabel_result( result: DataFrame | Series, func: dict[str, list[Callable | str]], columns: Iterable[Hashable], order: Iterable[int], ) -> dict[Hashable, Series]` to solve the following problem: Internal function to reorder result if relabelling is True for dataframe.agg, and return the reordered result in dict. Parameters: ---------- result: Result from aggregation func: Dict of (column name, funcs) columns: New columns name for relabelling order: New order for relabelling Examples: --------- >>> result = DataFrame({"A": [np.nan, 2, np.nan], ... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} >>> columns = ("foo", "aab", "bar", "dat") >>> order = [0, 1, 2, 3] >>> _relabel_result(result, func, columns, order) # doctest: +SKIP dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]), C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]), B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"])) Here is the function: def relabel_result( result: DataFrame | Series, func: dict[str, list[Callable | str]], columns: Iterable[Hashable], order: Iterable[int], ) -> dict[Hashable, Series]: """ Internal function to reorder result if relabelling is True for dataframe.agg, and return the reordered result in dict. Parameters: ---------- result: Result from aggregation func: Dict of (column name, funcs) columns: New columns name for relabelling order: New order for relabelling Examples: --------- >>> result = DataFrame({"A": [np.nan, 2, np.nan], ... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} >>> columns = ("foo", "aab", "bar", "dat") >>> order = [0, 1, 2, 3] >>> _relabel_result(result, func, columns, order) # doctest: +SKIP dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]), C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]), B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"])) """ from pandas.core.indexes.base import Index reordered_indexes = [ pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) ] reordered_result_in_dict: dict[Hashable, Series] = {} idx = 0 reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 for col, fun in func.items(): s = result[col].dropna() # In the `_aggregate`, the callable names are obtained and used in `result`, and # these names are ordered alphabetically. e.g. # C2 C1 # <lambda> 1 NaN # amax NaN 4.0 # max NaN 4.0 # sum 18.0 6.0 # Therefore, the order of functions for each column could be shuffled # accordingly so need to get the callable name if it is not parsed names, and # reorder the aggregated result for each column. # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is # [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to # reorder so that aggregated values map to their functions regarding the order. # However there is only one column being used for aggregation, not need to # reorder since the index is not sorted, and keep as is in `funcs`, e.g. # A # min 1.0 # mean 1.5 # mean 1.5 if reorder_mask: fun = [ com.get_callable_name(f) if not isinstance(f, str) else f for f in fun ] col_idx_order = Index(s.index).get_indexer(fun) s = s[col_idx_order] # assign the new user-provided "named aggregation" as index names, and reindex # it based on the whole user-provided names. s.index = reordered_indexes[idx : idx + len(fun)] reordered_result_in_dict[col] = s.reindex(columns, copy=False) idx = idx + len(fun) return reordered_result_in_dict
Internal function to reorder result if relabelling is True for dataframe.agg, and return the reordered result in dict. Parameters: ---------- result: Result from aggregation func: Dict of (column name, funcs) columns: New columns name for relabelling order: New order for relabelling Examples: --------- >>> result = DataFrame({"A": [np.nan, 2, np.nan], ... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} >>> columns = ("foo", "aab", "bar", "dat") >>> order = [0, 1, 2, 3] >>> _relabel_result(result, func, columns, order) # doctest: +SKIP dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]), C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]), B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
173,117
from __future__ import annotations import abc from collections import defaultdict from contextlib import nullcontext from functools import partial import inspect from typing import ( TYPE_CHECKING, Any, Callable, ContextManager, DefaultDict, Dict, Hashable, Iterable, Iterator, List, Sequence, cast, ) import numpy as np from pandas._config import option_context from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt, ) from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, ABCSeries, ) from pandas.core.algorithms import safe_sort from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: """ Possibly mangle a list of aggfuncs. Parameters ---------- aggfuncs : Sequence Returns ------- mangled: list-like A new AggSpec sequence, where lambdas have been converted to have unique names. Notes ----- If just one aggfunc is passed, the name will not be mangled. """ if len(aggfuncs) <= 1: # don't mangle for .agg([lambda x: .]) return aggfuncs i = 0 mangled_aggfuncs = [] for aggfunc in aggfuncs: if com.get_callable_name(aggfunc) == "<lambda>": aggfunc = partial(aggfunc) aggfunc.__name__ = f"<lambda_{i}>" i += 1 mangled_aggfuncs.append(aggfunc) return mangled_aggfuncs Any = object() The provided code snippet includes necessary dependencies for implementing the `maybe_mangle_lambdas` function. Write a Python function `def maybe_mangle_lambdas(agg_spec: Any) -> Any` to solve the following problem: Make new lambdas with unique names. Parameters ---------- agg_spec : Any An argument to GroupBy.agg. Non-dict-like `agg_spec` are pass through as is. For dict-like `agg_spec` a new spec is returned with name-mangled lambdas. Returns ------- mangled : Any Same type as the input. Examples -------- >>> maybe_mangle_lambdas('sum') 'sum' >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP [<function __main__.<lambda_0>, <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>] Here is the function: def maybe_mangle_lambdas(agg_spec: Any) -> Any: """ Make new lambdas with unique names. Parameters ---------- agg_spec : Any An argument to GroupBy.agg. Non-dict-like `agg_spec` are pass through as is. For dict-like `agg_spec` a new spec is returned with name-mangled lambdas. Returns ------- mangled : Any Same type as the input. Examples -------- >>> maybe_mangle_lambdas('sum') 'sum' >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP [<function __main__.<lambda_0>, <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>] """ is_dict = is_dict_like(agg_spec) if not (is_dict or is_list_like(agg_spec)): return agg_spec mangled_aggspec = type(agg_spec)() # dict or OrderedDict if is_dict: for key, aggfuncs in agg_spec.items(): if is_list_like(aggfuncs) and not is_dict_like(aggfuncs): mangled_aggfuncs = _managle_lambda_list(aggfuncs) else: mangled_aggfuncs = aggfuncs mangled_aggspec[key] = mangled_aggfuncs else: mangled_aggspec = _managle_lambda_list(agg_spec) return mangled_aggspec
Make new lambdas with unique names. Parameters ---------- agg_spec : Any An argument to GroupBy.agg. Non-dict-like `agg_spec` are pass through as is. For dict-like `agg_spec` a new spec is returned with name-mangled lambdas. Returns ------- mangled : Any Same type as the input. Examples -------- >>> maybe_mangle_lambdas('sum') 'sum' >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP [<function __main__.<lambda_0>, <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
173,118
from __future__ import annotations import abc from collections import defaultdict from contextlib import nullcontext from functools import partial import inspect from typing import ( TYPE_CHECKING, Any, Callable, ContextManager, DefaultDict, Dict, Hashable, Iterable, Iterator, List, Sequence, cast, ) import numpy as np from pandas._config import option_context from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt, ) from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, ABCSeries, ) from pandas.core.algorithms import safe_sort from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike Any = object() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) The provided code snippet includes necessary dependencies for implementing the `validate_func_kwargs` function. Write a Python function `def validate_func_kwargs( kwargs: dict, ) -> tuple[list[str], list[str | Callable[..., Any]]]` to solve the following problem: Validates types of user-provided "named aggregation" kwargs. `TypeError` is raised if aggfunc is not `str` or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provied keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) (['one', 'two'], ['min', 'max']) Here is the function: def validate_func_kwargs( kwargs: dict, ) -> tuple[list[str], list[str | Callable[..., Any]]]: """ Validates types of user-provided "named aggregation" kwargs. `TypeError` is raised if aggfunc is not `str` or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provied keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) (['one', 'two'], ['min', 'max']) """ tuple_given_message = "func is expected but received {} in **kwargs." columns = list(kwargs) func = [] for col_func in kwargs.values(): if not (isinstance(col_func, str) or callable(col_func)): raise TypeError(tuple_given_message.format(type(col_func).__name__)) func.append(col_func) if not columns: no_arg_message = "Must provide 'func' or named aggregation **kwargs." raise TypeError(no_arg_message) return columns, func
Validates types of user-provided "named aggregation" kwargs. `TypeError` is raised if aggfunc is not `str` or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provied keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) (['one', 'two'], ['min', 'max'])
173,119
from __future__ import annotations import operator import numpy as np The provided code snippet includes necessary dependencies for implementing the `make_invalid_op` function. Write a Python function `def make_invalid_op(name: str)` to solve the following problem: Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function Here is the function: def make_invalid_op(name: str): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function """ def invalid_op(self, other=None): typ = type(self).__name__ raise TypeError(f"cannot perform {name} with this index type: {typ}") invalid_op.__name__ = name return invalid_op
Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function
173,120
from __future__ import annotations _op_descriptions: dict[str, dict[str, str | None]] = { # Arithmetic Operators "add": { "op": "+", "desc": "Addition", "reverse": "radd", "series_examples": _add_example_SERIES, "series_returns": _returns_series, }, "sub": { "op": "-", "desc": "Subtraction", "reverse": "rsub", "series_examples": _sub_example_SERIES, "series_returns": _returns_series, }, "mul": { "op": "*", "desc": "Multiplication", "reverse": "rmul", "series_examples": _mul_example_SERIES, "series_returns": _returns_series, "df_examples": None, }, "mod": { "op": "%", "desc": "Modulo", "reverse": "rmod", "series_examples": _mod_example_SERIES, "series_returns": _returns_series, }, "pow": { "op": "**", "desc": "Exponential power", "reverse": "rpow", "series_examples": _pow_example_SERIES, "series_returns": _returns_series, "df_examples": None, }, "truediv": { "op": "/", "desc": "Floating division", "reverse": "rtruediv", "series_examples": _div_example_SERIES, "series_returns": _returns_series, "df_examples": None, }, "floordiv": { "op": "//", "desc": "Integer division", "reverse": "rfloordiv", "series_examples": _floordiv_example_SERIES, "series_returns": _returns_series, "df_examples": None, }, "divmod": { "op": "divmod", "desc": "Integer division and modulo", "reverse": "rdivmod", "series_examples": _divmod_example_SERIES, "series_returns": _returns_tuple, "df_examples": None, }, # Comparison Operators "eq": { "op": "==", "desc": "Equal to", "reverse": None, "series_examples": _eq_example_SERIES, "series_returns": _returns_series, }, "ne": { "op": "!=", "desc": "Not equal to", "reverse": None, "series_examples": _ne_example_SERIES, "series_returns": _returns_series, }, "lt": { "op": "<", "desc": "Less than", "reverse": None, "series_examples": _lt_example_SERIES, "series_returns": _returns_series, }, "le": { "op": "<=", "desc": "Less than or equal to", "reverse": None, "series_examples": _le_example_SERIES, "series_returns": _returns_series, }, "gt": { "op": ">", "desc": "Greater than", "reverse": None, "series_examples": _gt_example_SERIES, "series_returns": _returns_series, }, "ge": { "op": ">=", "desc": "Greater than or equal to", "reverse": None, "series_examples": _ge_example_SERIES, "series_returns": _returns_series, }, } _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {{0 or 'index'}} Unused. Parameter needed for compatibility with DataFrame. Returns ------- {series_returns} """ _see_also_reverse_SERIES = """ See Also -------- Series.{reverse} : {see_also_desc}. """ _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. Parameters ---------- other : scalar, sequence, Series, dict or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}} Whether to compare by the index (0 or 'index') or columns. (1 or 'columns'). For Series input, axis to match Series index on. level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : float or None, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing. Returns ------- DataFrame Result of the arithmetic operation. See Also -------- DataFrame.add : Add DataFrames. DataFrame.sub : Subtract DataFrames. DataFrame.mul : Multiply DataFrames. DataFrame.div : Divide DataFrames (float division). DataFrame.truediv : Divide DataFrames (float division). DataFrame.floordiv : Divide DataFrames (integer division). DataFrame.mod : Calculate modulo (remainder after division). DataFrame.pow : Calculate exponential power. Notes ----- Mismatched indices will be unioned together. Examples -------- >>> df = pd.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide by constant with reverse version. >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract a list and Series by axis with operator version. >>> df - [1, 2] angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub([1, 2], axis='columns') angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), ... axis='index') angles degrees circle -1 359 triangle 2 179 rectangle 3 359 Multiply a dictionary by axis. >>> df.mul({{'angles': 0, 'degrees': 2}}) angles degrees circle 0 720 triangle 0 360 rectangle 0 720 >>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index') angles degrees circle 0 0 triangle 6 360 rectangle 12 1080 Multiply a DataFrame of different shape with operator version. >>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, ... index=['circle', 'triangle', 'rectangle']) >>> other angles circle 0 triangle 3 rectangle 4 >>> df * other angles degrees circle 0 NaN triangle 9 NaN rectangle 16 NaN >>> df.mul(other, fill_value=0) angles degrees circle 0 0.0 triangle 9 0.0 rectangle 16 0.0 Divide by a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], ... 'degrees': [360, 180, 360, 360, 540, 720]}}, ... index=[['A', 'A', 'A', 'B', 'B', 'B'], ... ['circle', 'triangle', 'rectangle', ... 'square', 'pentagon', 'hexagon']]) >>> df_multindex angles degrees A circle 0 360 triangle 3 180 rectangle 4 360 B square 4 360 pentagon 5 540 hexagon 6 720 >>> df.div(df_multindex, level=1, fill_value=0) angles degrees A circle NaN 1.0 triangle 1.0 1.0 rectangle 1.0 1.0 B square 0.0 0.0 pentagon 0.0 0.0 hexagon 0.0 0.0 """ The provided code snippet includes necessary dependencies for implementing the `make_flex_doc` function. Write a Python function `def make_flex_doc(op_name: str, typ: str) -> str` to solve the following problem: Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str Here is the function: def make_flex_doc(op_name: str, typ: str) -> str: """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str """ op_name = op_name.replace("__", "") op_desc = _op_descriptions[op_name] op_desc_op = op_desc["op"] assert op_desc_op is not None # for mypy if op_name.startswith("r"): equiv = f"other {op_desc_op} {typ}" elif op_name == "divmod": equiv = f"{op_name}({typ}, other)" else: equiv = f"{typ} {op_desc_op} other" if typ == "series": base_doc = _flex_doc_SERIES if op_desc["reverse"]: base_doc += _see_also_reverse_SERIES.format( reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] ) doc_no_examples = base_doc.format( desc=op_desc["desc"], op_name=op_name, equiv=equiv, series_returns=op_desc["series_returns"], ) ser_example = op_desc["series_examples"] if ser_example: doc = doc_no_examples + ser_example else: doc = doc_no_examples elif typ == "dataframe": base_doc = _flex_doc_FRAME doc = base_doc.format( desc=op_desc["desc"], op_name=op_name, equiv=equiv, reverse=op_desc["reverse"], ) else: raise AssertionError("Invalid typ argument.") return doc
Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str
173,121
from __future__ import annotations from functools import wraps import sys from typing import Callable from pandas._libs.lib import item_from_zerodim from pandas._libs.missing import is_matching_na from pandas._typing import F from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) def _maybe_match_name(a, b): """ Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match or None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr """ a_has = hasattr(a, "name") b_has = hasattr(b, "name") if a_has and b_has: try: if a.name == b.name: return a.name elif is_matching_na(a.name, b.name): # e.g. both are np.nan return a.name else: return None except TypeError: # pd.NA if is_matching_na(a.name, b.name): return a.name return None except ValueError: # e.g. np.int64(1) vs (np.int64(1), np.int64(2)) return None elif a_has: return a.name elif b_has: return b.name return None ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `get_op_result_name` function. Write a Python function `def get_op_result_name(left, right)` to solve the following problem: Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string Here is the function: def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ if isinstance(right, (ABCSeries, ABCIndex)): name = _maybe_match_name(left, right) else: name = left.name return name
Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string
173,122
from __future__ import annotations import datetime from functools import partial import operator from typing import Any import numpy as np from pandas._libs import ( NaT, Timedelta, Timestamp, lib, ops as libops, ) from pandas._libs.tslibs import BaseOffset from pandas._typing import ( ArrayLike, Shape, ) from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, find_common_type, ) from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_integer_dtype, is_list_like, is_numeric_v_string_like, is_object_dtype, is_scalar, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core.computation import expressions from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.ops import ( missing, roperator, ) from pandas.core.ops.dispatch import should_extension_dispatch from pandas.core.ops.invalid import invalid_comparison def arithmetic_op(left: ArrayLike, right: Any, op): """ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... Note: the caller is responsible for ensuring that numpy warnings are suppressed (with np.errstate(all="ignore")) if needed. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame or Index. Series is *not* excluded. op : {operator.add, operator.sub, ...} Or one of the reversed variants from roperator. Returns ------- ndarray or ExtensionArray Or a 2-tuple of these in the case of divmod or rdivmod. """ # NB: We assume that extract_array and ensure_wrapped_if_datetimelike # have already been called on `left` and `right`, # and `maybe_prepare_scalar_for_op` has already been called on `right` # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) if ( should_extension_dispatch(left, right) or isinstance(right, (Timedelta, BaseOffset, Timestamp)) or right is NaT ): # Timedelta/Timestamp and other custom scalars are included in the check # because numexpr will fail on it, see GH#31457 res_values = op(left, right) else: # TODO we should handle EAs consistently and move this check before the if/else # (https://github.com/pandas-dev/pandas/issues/41165) _bool_arith_check(op, left, right) # error: Argument 1 to "_na_arithmetic_op" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] return res_values def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: """ Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. Note: the caller is responsible for ensuring that numpy warnings are suppressed (with np.errstate(all="ignore")) if needed. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} Returns ------- ndarray or ExtensionArray """ # NB: We assume extract_array has already been called on left and right lvalues = ensure_wrapped_if_datetimelike(left) rvalues = ensure_wrapped_if_datetimelike(right) rvalues = lib.item_from_zerodim(rvalues) if isinstance(rvalues, list): # We don't catch tuple here bc we may be comparing e.g. MultiIndex # to a tuple that represents a single entry, see test_compare_tuple_strs rvalues = np.asarray(rvalues) if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): # TODO: make this treatment consistent across ops and classes. # We are not catching all listlikes here (e.g. frozenset, tuple) # The ambiguous case is object-dtype. See GH#27803 if len(lvalues) != len(rvalues): raise ValueError( "Lengths must match to compare", lvalues.shape, rvalues.shape ) if should_extension_dispatch(lvalues, rvalues) or ( (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) and not is_object_dtype(lvalues.dtype) ): # Call the method on lvalues res_values = op(lvalues, rvalues) elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA? # numpy does not like comparisons vs None if op is operator.ne: res_values = np.ones(lvalues.shape, dtype=bool) else: res_values = np.zeros(lvalues.shape, dtype=bool) elif is_numeric_v_string_like(lvalues, rvalues): # GH#36377 going through the numexpr path would incorrectly raise return invalid_comparison(lvalues, rvalues, op) elif is_object_dtype(lvalues.dtype) or isinstance(rvalues, str): res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) else: res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) return res_values def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: """ Evaluate a logical operation `|`, `&`, or `^`. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.and_, operator.or_, operator.xor} Or one of the reversed variants from roperator. Returns ------- ndarray or ExtensionArray """ fill_int = lambda x: x def fill_bool(x, left=None): # if `left` is specifically not-boolean, we do not cast to bool if x.dtype.kind in ["c", "f", "O"]: # dtypes that can hold NA mask = isna(x) if mask.any(): x = x.astype(object) x[mask] = False if left is None or is_bool_dtype(left.dtype): x = x.astype(bool) return x is_self_int_dtype = is_integer_dtype(left.dtype) right = lib.item_from_zerodim(right) if is_list_like(right) and not hasattr(right, "dtype"): # e.g. list, tuple right = construct_1d_object_array_from_listlike(right) # NB: We assume extract_array has already been called on left and right lvalues = ensure_wrapped_if_datetimelike(left) rvalues = right if should_extension_dispatch(lvalues, rvalues): # Call the method on lvalues res_values = op(lvalues, rvalues) else: if isinstance(rvalues, np.ndarray): is_other_int_dtype = is_integer_dtype(rvalues.dtype) rvalues = rvalues if is_other_int_dtype else fill_bool(rvalues, lvalues) else: # i.e. scalar is_other_int_dtype = lib.is_integer(rvalues) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool res_values = na_logical_op(lvalues, rvalues, op) # error: Cannot call function of unknown type res_values = filler(res_values) # type: ignore[operator] return res_values class partial(Generic[_T]): func: Callable[..., _T] args: Tuple[Any, ...] keywords: Dict[str, Any] def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> _T: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... The provided code snippet includes necessary dependencies for implementing the `get_array_op` function. Write a Python function `def get_array_op(op)` to solve the following problem: Return a binary array operation corresponding to the given operator op. Parameters ---------- op : function Binary operator from operator or roperator module. Returns ------- functools.partial Here is the function: def get_array_op(op): """ Return a binary array operation corresponding to the given operator op. Parameters ---------- op : function Binary operator from operator or roperator module. Returns ------- functools.partial """ if isinstance(op, partial): # We get here via dispatch_to_series in DataFrame case # e.g. test_rolling_consistency_var_debiasing_factors return op op_name = op.__name__.strip("_").lstrip("r") if op_name == "arith_op": # Reached via DataFrame._combine_frame i.e. flex methods # e.g. test_df_add_flex_filled_mixed_dtypes return op if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}: return partial(comparison_op, op=op) elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}: return partial(logical_op, op=op) elif op_name in { "add", "sub", "mul", "truediv", "floordiv", "mod", "divmod", "pow", }: return partial(arithmetic_op, op=op) else: raise NotImplementedError(op_name)
Return a binary array operation corresponding to the given operator op. Parameters ---------- op : function Binary operator from operator or roperator module. Returns ------- functools.partial
173,123
from __future__ import annotations import datetime from functools import partial import operator from typing import Any import numpy as np from pandas._libs import ( NaT, Timedelta, Timestamp, lib, ops as libops, ) from pandas._libs.tslibs import BaseOffset from pandas._typing import ( ArrayLike, Shape, ) from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, find_common_type, ) from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_integer_dtype, is_list_like, is_numeric_v_string_like, is_object_dtype, is_scalar, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core.computation import expressions from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.ops import ( missing, roperator, ) from pandas.core.ops.dispatch import should_extension_dispatch from pandas.core.ops.invalid import invalid_comparison Shape = Tuple[int, ...] def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) The provided code snippet includes necessary dependencies for implementing the `maybe_prepare_scalar_for_op` function. Write a Python function `def maybe_prepare_scalar_for_op(obj, shape: Shape)` to solve the following problem: Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object shape : tuple[int] Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. Here is the function: def maybe_prepare_scalar_for_op(obj, shape: Shape): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object shape : tuple[int] Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return Timedelta(obj) elif type(obj) is datetime.datetime: # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above return Timestamp(obj) elif isinstance(obj, np.datetime64): # GH#28080 numpy casts integer-dtype to datetime64 when doing # array[int] + datetime64, which we do not allow if isna(obj): from pandas.core.arrays import DatetimeArray # Avoid possible ambiguities with pd.NaT obj = obj.astype("datetime64[ns]") right = np.broadcast_to(obj, shape) return DatetimeArray(right) return Timestamp(obj) elif isinstance(obj, np.timedelta64): if isna(obj): from pandas.core.arrays import TimedeltaArray # wrapping timedelta64("NaT") in Timedelta returns NaT, # which would incorrectly be treated as a datetime-NaT, so # we broadcast and wrap in a TimedeltaArray obj = obj.astype("timedelta64[ns]") right = np.broadcast_to(obj, shape) return TimedeltaArray(right) # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') return Timedelta(obj) return obj
Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object shape : tuple[int] Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation.
173,124
from __future__ import annotations import operator from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.ops import roperator def _get_method_wrappers(cls): """ Find the appropriate operation-wrappers to use when defining flex/special arithmetic, boolean, and comparison operations with the given class. Parameters ---------- cls : class Returns ------- arith_flex : function or None comp_flex : function or None """ # TODO: make these non-runtime imports once the relevant functions # are no longer in __init__ from pandas.core.ops import ( flex_arith_method_FRAME, flex_comp_method_FRAME, flex_method_SERIES, ) if issubclass(cls, ABCSeries): # Just Series arith_flex = flex_method_SERIES comp_flex = flex_method_SERIES elif issubclass(cls, ABCDataFrame): arith_flex = flex_arith_method_FRAME comp_flex = flex_comp_method_FRAME return arith_flex, comp_flex def _create_methods(cls, arith_method, comp_method): # creates actual flex methods based upon arithmetic, and comp method # constructors. have_divmod = issubclass(cls, ABCSeries) # divmod is available for Series new_methods = {} new_methods.update( { "add": arith_method(operator.add), "radd": arith_method(roperator.radd), "sub": arith_method(operator.sub), "mul": arith_method(operator.mul), "truediv": arith_method(operator.truediv), "floordiv": arith_method(operator.floordiv), "mod": arith_method(operator.mod), "pow": arith_method(operator.pow), "rmul": arith_method(roperator.rmul), "rsub": arith_method(roperator.rsub), "rtruediv": arith_method(roperator.rtruediv), "rfloordiv": arith_method(roperator.rfloordiv), "rpow": arith_method(roperator.rpow), "rmod": arith_method(roperator.rmod), } ) new_methods["div"] = new_methods["truediv"] new_methods["rdiv"] = new_methods["rtruediv"] if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods["divmod"] = arith_method(divmod) new_methods["rdivmod"] = arith_method(roperator.rdivmod) new_methods.update( { "eq": comp_method(operator.eq), "ne": comp_method(operator.ne), "lt": comp_method(operator.lt), "gt": comp_method(operator.gt), "le": comp_method(operator.le), "ge": comp_method(operator.ge), } ) new_methods = {k.strip("_"): v for k, v in new_methods.items()} return new_methods def _add_methods(cls, new_methods) -> None: for name, method in new_methods.items(): setattr(cls, name, method) The provided code snippet includes necessary dependencies for implementing the `add_flex_arithmetic_methods` function. Write a Python function `def add_flex_arithmetic_methods(cls) -> None` to solve the following problem: Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class Here is the function: def add_flex_arithmetic_methods(cls) -> None: """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class """ flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { "multiply": new_methods["mul"], "subtract": new_methods["sub"], "divide": new_methods["div"], } ) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_")) _add_methods(cls, new_methods=new_methods)
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class
173,125
from __future__ import annotations import numpy as np from pandas._libs import ( lib, missing as libmissing, ) def raise_for_nan(value, method: str) -> None: if lib.is_float(value) and np.isnan(value): raise ValueError(f"Cannot perform logical '{method}' with floating NaN") The provided code snippet includes necessary dependencies for implementing the `kleene_or` function. Write a Python function `def kleene_or( left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None, )` to solve the following problem: Boolean ``or`` using Kleene logic. Values are NA where we have ``NA | NA`` or ``NA | False``. ``NA | True`` is considered True. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask. Here is the function: def kleene_or( left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None, ): """ Boolean ``or`` using Kleene logic. Values are NA where we have ``NA | NA`` or ``NA | False``. ``NA | True`` is considered True. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask. """ # To reduce the number of cases, we ensure that `left` & `left_mask` # always come from an array, not a scalar. This is safe, since # A | B == B | A if left_mask is None: return kleene_or(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError("Either `left` or `right` need to be a np.ndarray.") raise_for_nan(right, method="or") if right is libmissing.NA: result = left.copy() else: result = left | right if right_mask is not None: # output is unknown where (False & NA), (NA & False), (NA & NA) left_false = ~(left | left_mask) right_false = ~(right | right_mask) mask = ( (left_false & right_mask) | (right_false & left_mask) | (left_mask & right_mask) ) else: if right is True: mask = np.zeros_like(left_mask) elif right is libmissing.NA: mask = (~left & ~left_mask) | left_mask else: # False mask = left_mask.copy() return result, mask
Boolean ``or`` using Kleene logic. Values are NA where we have ``NA | NA`` or ``NA | False``. ``NA | True`` is considered True. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask.
173,126
from __future__ import annotations import numpy as np from pandas._libs import ( lib, missing as libmissing, ) def raise_for_nan(value, method: str) -> None: if lib.is_float(value) and np.isnan(value): raise ValueError(f"Cannot perform logical '{method}' with floating NaN") The provided code snippet includes necessary dependencies for implementing the `kleene_xor` function. Write a Python function `def kleene_xor( left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None, )` to solve the following problem: Boolean ``xor`` using Kleene logic. This is the same as ``or``, with the following adjustments * True, True -> False * True, NA -> NA Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask. Here is the function: def kleene_xor( left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None, ): """ Boolean ``xor`` using Kleene logic. This is the same as ``or``, with the following adjustments * True, True -> False * True, NA -> NA Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask. """ # To reduce the number of cases, we ensure that `left` & `left_mask` # always come from an array, not a scalar. This is safe, since # A ^ B == B ^ A if left_mask is None: return kleene_xor(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError("Either `left` or `right` need to be a np.ndarray.") raise_for_nan(right, method="xor") if right is libmissing.NA: result = np.zeros_like(left) else: result = left ^ right if right_mask is None: if right is libmissing.NA: mask = np.ones_like(left_mask) else: mask = left_mask.copy() else: mask = left_mask | right_mask return result, mask
Boolean ``xor`` using Kleene logic. This is the same as ``or``, with the following adjustments * True, True -> False * True, NA -> NA Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask.
173,127
from __future__ import annotations import numpy as np from pandas._libs import ( lib, missing as libmissing, ) def raise_for_nan(value, method: str) -> None: if lib.is_float(value) and np.isnan(value): raise ValueError(f"Cannot perform logical '{method}' with floating NaN") The provided code snippet includes necessary dependencies for implementing the `kleene_and` function. Write a Python function `def kleene_and( left: bool | libmissing.NAType | np.ndarray, right: bool | libmissing.NAType | np.ndarray, left_mask: np.ndarray | None, right_mask: np.ndarray | None, )` to solve the following problem: Boolean ``and`` using Kleene logic. Values are ``NA`` for ``NA & NA`` or ``True & NA``. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask. Here is the function: def kleene_and( left: bool | libmissing.NAType | np.ndarray, right: bool | libmissing.NAType | np.ndarray, left_mask: np.ndarray | None, right_mask: np.ndarray | None, ): """ Boolean ``and`` using Kleene logic. Values are ``NA`` for ``NA & NA`` or ``True & NA``. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask. """ # To reduce the number of cases, we ensure that `left` & `left_mask` # always come from an array, not a scalar. This is safe, since # A & B == B & A if left_mask is None: return kleene_and(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError("Either `left` or `right` need to be a np.ndarray.") raise_for_nan(right, method="and") if right is libmissing.NA: result = np.zeros_like(left) else: result = left & right if right_mask is None: # Scalar `right` if right is libmissing.NA: mask = (left & ~left_mask) | left_mask else: mask = left_mask.copy() if right is False: # unmask everything mask[:] = False else: # unmask where either left or right is False left_false = ~(left | left_mask) right_false = ~(right | right_mask) mask = (left_mask & ~right_false) | (right_mask & ~left_false) return result, mask
Boolean ``and`` using Kleene logic. Values are ``NA`` for ``NA & NA`` or ``True & NA``. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask.
173,128
from __future__ import annotations from typing import ( Any, Callable, Literal, ) import numpy as np from pandas._libs import ( Timedelta, Timestamp, ) from pandas._libs.lib import infer_dtype from pandas._typing import IntervalLeftRight from pandas.core.dtypes.common import ( DT64NS_DTYPE, ensure_platform_int, is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_extension_array_dtype, is_integer, is_list_like, is_numeric_dtype, is_scalar, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna from pandas import ( Categorical, Index, IntervalIndex, to_datetime, to_timedelta, ) from pandas.core import nanops import pandas.core.algorithms as algos def _bins_to_cuts( x, bins: np.ndarray, right: bool = True, labels=None, precision: int = 3, include_lowest: bool = False, dtype=None, duplicates: str = "raise", ordered: bool = True, ): if not ordered and labels is None: raise ValueError("'labels' must be provided if 'ordered = False'") if duplicates not in ["raise", "drop"]: raise ValueError( "invalid value for 'duplicates' parameter, valid options are: raise, drop" ) if isinstance(bins, IntervalIndex): # we have a fast-path here ids = bins.get_indexer(x) result = Categorical.from_codes(ids, categories=bins, ordered=True) return result, bins unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == "raise": raise ValueError( f"Bin edges must be unique: {repr(bins)}.\n" f"You can drop duplicate edges by setting the 'duplicates' kwarg" ) bins = unique_bins side: Literal["left", "right"] = "left" if right else "right" ids = ensure_platform_int(bins.searchsorted(x, side=side)) if include_lowest: ids[np.asarray(x) == bins[0]] = 1 na_mask = isna(x) | (ids == len(bins)) | (ids == 0) has_nas = na_mask.any() if labels is not False: if not (labels is None or is_list_like(labels)): raise ValueError( "Bin labels must either be False, None or passed in as a " "list-like argument" ) if labels is None: labels = _format_labels( bins, precision, right=right, include_lowest=include_lowest, dtype=dtype ) elif ordered and len(set(labels)) != len(labels): raise ValueError( "labels must be unique if ordered=True; pass ordered=False " "for duplicate labels" ) else: if len(labels) != len(bins) - 1: raise ValueError( "Bin labels must be one fewer than the number of bin edges" ) if not is_categorical_dtype(labels): labels = Categorical( labels, categories=labels if len(set(labels)) == len(labels) else None, ordered=ordered, ) # TODO: handle mismatch between categorical label order and pandas.cut order. np.putmask(ids, na_mask, 0) result = algos.take_nd(labels, ids - 1) else: result = ids - 1 if has_nas: result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) return result, bins def _coerce_to_type(x): """ if the passed data is of datetime/timedelta, bool or nullable int type, this method converts it to numeric so that cut or qcut method can handle it """ dtype = None if is_datetime64tz_dtype(x.dtype): dtype = x.dtype elif is_datetime64_dtype(x.dtype): x = to_datetime(x).astype("datetime64[ns]", copy=False) dtype = np.dtype("datetime64[ns]") elif is_timedelta64_dtype(x.dtype): x = to_timedelta(x) dtype = np.dtype("timedelta64[ns]") elif is_bool_dtype(x.dtype): # GH 20303 x = x.astype(np.int64) # To support cut and qcut for IntegerArray we convert to float dtype. # Will properly support in the future. # https://github.com/pandas-dev/pandas/pull/31290 # https://github.com/pandas-dev/pandas/issues/31389 elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype): x = x.to_numpy(dtype=np.float64, na_value=np.nan) if dtype is not None: # GH 19768: force NaT to NaN during integer conversion x = np.where(x.notna(), x.view(np.int64), np.nan) return x, dtype def _preprocess_for_cut(x): """ handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately """ # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, "ndim", None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError("Input array must be 1 dimensional") return x def _postprocess_for_cut(fac, bins, retbins: bool, dtype, original): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if isinstance(original, ABCSeries): fac = original._constructor(fac, index=original.index, name=original.name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins The provided code snippet includes necessary dependencies for implementing the `qcut` function. Write a Python function `def qcut( x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = "raise", )` to solve the following problem: Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : int or list-like of float Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. labels : array or False, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. If True, raises an error. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) Here is the function: def qcut( x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = "raise", ): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : int or list-like of float Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. labels : array or False, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. If True, raises an error. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ original = x x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q x_np = np.asarray(x) x_np = x_np[~np.isnan(x_np)] bins = np.quantile(x_np, quantiles) fac, bins = _bins_to_cuts( x, bins, labels=labels, precision=precision, include_lowest=True, dtype=dtype, duplicates=duplicates, ) return _postprocess_for_cut(fac, bins, retbins, dtype, original)
Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : int or list-like of float Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. labels : array or False, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. If True, raises an error. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3])
173,129
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible class _MergeOperation: """ Perform a database (SQL) merge operation between two DataFrame or Series objects using either columns as keys or their row indexes """ _merge_type = "merge" how: MergeHow | Literal["asof"] on: IndexLabel | None # left_on/right_on may be None when passed, but in validate_specification # get replaced with non-None. left_on: Sequence[Hashable | AnyArrayLike] right_on: Sequence[Hashable | AnyArrayLike] left_index: bool right_index: bool axis: AxisInt bm_axis: AxisInt sort: bool suffixes: Suffixes copy: bool indicator: str | bool validate: str | None join_names: list[Hashable] right_join_keys: list[AnyArrayLike] left_join_keys: list[AnyArrayLike] def __init__( self, left: DataFrame | Series, right: DataFrame | Series, how: MergeHow | Literal["asof"] = "inner", on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, axis: AxisInt = 1, left_index: bool = False, right_index: bool = False, sort: bool = True, suffixes: Suffixes = ("_x", "_y"), indicator: str | bool = False, validate: str | None = None, ) -> None: _left = _validate_operand(left) _right = _validate_operand(right) self.left = self.orig_left = _left self.right = self.orig_right = _right self.how = how # bm_axis -> the axis on the BlockManager self.bm_axis = axis # axis --> the axis on the Series/DataFrame self.axis = 1 - axis if self.left.ndim == 2 else 0 self.on = com.maybe_make_list(on) self.suffixes = suffixes self.sort = sort self.left_index = left_index self.right_index = right_index self.indicator = indicator if not is_bool(left_index): raise ValueError( f"left_index parameter must be of type bool, not {type(left_index)}" ) if not is_bool(right_index): raise ValueError( f"right_index parameter must be of type bool, not {type(right_index)}" ) # GH 40993: raise when merging between different levels; enforced in 2.0 if _left.columns.nlevels != _right.columns.nlevels: msg = ( "Not allowed to merge between different levels. " f"({_left.columns.nlevels} levels on the left, " f"{_right.columns.nlevels} on the right)" ) raise MergeError(msg) self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on) cross_col = None if self.how == "cross": ( self.left, self.right, self.how, cross_col, ) = self._create_cross_configuration(self.left, self.right) self.left_on = self.right_on = [cross_col] self._cross = cross_col # note this function has side effects ( self.left_join_keys, self.right_join_keys, self.join_names, ) = self._get_merge_keys() # validate the merge keys dtypes. We may need to coerce # to avoid incompatible dtypes self._maybe_coerce_merge_keys() # If argument passed to validate, # check if columns specified as unique # are in fact unique. if validate is not None: self._validate(validate) def _reindex_and_concat( self, join_index: Index, left_indexer: npt.NDArray[np.intp] | None, right_indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> DataFrame: """ reindex along index and concat along columns. """ # Take views so we do not alter the originals left = self.left[:] right = self.right[:] llabels, rlabels = _items_overlap_with_suffix( self.left._info_axis, self.right._info_axis, self.suffixes ) if left_indexer is not None and not is_range_indexer(left_indexer, len(left)): # Pinning the index here (and in the right code just below) is not # necessary, but makes the `.take` more performant if we have e.g. # a MultiIndex for left.index. lmgr = left._mgr.reindex_indexer( join_index, left_indexer, axis=1, copy=False, only_slice=True, allow_dups=True, use_na_proxy=True, ) left = left._constructor(lmgr) left.index = join_index if right_indexer is not None and not is_range_indexer( right_indexer, len(right) ): rmgr = right._mgr.reindex_indexer( join_index, right_indexer, axis=1, copy=False, only_slice=True, allow_dups=True, use_na_proxy=True, ) right = right._constructor(rmgr) right.index = join_index from pandas import concat left.columns = llabels right.columns = rlabels result = concat([left, right], axis=1, copy=copy) return result def get_result(self, copy: bool | None = True) -> DataFrame: if self.indicator: self.left, self.right = self._indicator_pre_merge(self.left, self.right) join_index, left_indexer, right_indexer = self._get_join_info() result = self._reindex_and_concat( join_index, left_indexer, right_indexer, copy=copy ) result = result.__finalize__(self, method=self._merge_type) if self.indicator: result = self._indicator_post_merge(result) self._maybe_add_join_keys(result, left_indexer, right_indexer) self._maybe_restore_index_levels(result) self._maybe_drop_cross_column(result, self._cross) return result.__finalize__(self, method="merge") def _maybe_drop_cross_column( self, result: DataFrame, cross_col: str | None ) -> None: if cross_col is not None: del result[cross_col] def _indicator_name(self) -> str | None: if isinstance(self.indicator, str): return self.indicator elif isinstance(self.indicator, bool): return "_merge" if self.indicator else None else: raise ValueError( "indicator option can only accept boolean or string arguments" ) def _indicator_pre_merge( self, left: DataFrame, right: DataFrame ) -> tuple[DataFrame, DataFrame]: columns = left.columns.union(right.columns) for i in ["_left_indicator", "_right_indicator"]: if i in columns: raise ValueError( "Cannot use `indicator=True` option when " f"data contains a column named {i}" ) if self._indicator_name in columns: raise ValueError( "Cannot use name of an existing column for indicator column" ) left = left.copy() right = right.copy() left["_left_indicator"] = 1 left["_left_indicator"] = left["_left_indicator"].astype("int8") right["_right_indicator"] = 2 right["_right_indicator"] = right["_right_indicator"].astype("int8") return left, right def _indicator_post_merge(self, result: DataFrame) -> DataFrame: result["_left_indicator"] = result["_left_indicator"].fillna(0) result["_right_indicator"] = result["_right_indicator"].fillna(0) result[self._indicator_name] = Categorical( (result["_left_indicator"] + result["_right_indicator"]), categories=[1, 2, 3], ) result[self._indicator_name] = result[ self._indicator_name ].cat.rename_categories(["left_only", "right_only", "both"]) result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) return result def _maybe_restore_index_levels(self, result: DataFrame) -> None: """ Restore index levels specified as `on` parameters Here we check for cases where `self.left_on` and `self.right_on` pairs each reference an index level in their respective DataFrames. The joined columns corresponding to these pairs are then restored to the index of `result`. **Note:** This method has side effects. It modifies `result` in-place Parameters ---------- result: DataFrame merge result Returns ------- None """ names_to_restore = [] for name, left_key, right_key in zip( self.join_names, self.left_on, self.right_on ): if ( # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible # type "Union[Hashable, ExtensionArray, Index, Series]"; expected # "Hashable" self.orig_left._is_level_reference(left_key) # type: ignore[arg-type] # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible # type "Union[Hashable, ExtensionArray, Index, Series]"; expected # "Hashable" and self.orig_right._is_level_reference( right_key # type: ignore[arg-type] ) and left_key == right_key and name not in result.index.names ): names_to_restore.append(name) if names_to_restore: result.set_index(names_to_restore, inplace=True) def _maybe_add_join_keys( self, result: DataFrame, left_indexer: np.ndarray | None, right_indexer: np.ndarray | None, ) -> None: left_has_missing = None right_has_missing = None assert all(is_array_like(x) for x in self.left_join_keys) keys = zip(self.join_names, self.left_on, self.right_on) for i, (name, lname, rname) in enumerate(keys): if not _should_fill(lname, rname): continue take_left, take_right = None, None if name in result: if left_indexer is not None and right_indexer is not None: if name in self.left: if left_has_missing is None: left_has_missing = (left_indexer == -1).any() if left_has_missing: take_right = self.right_join_keys[i] if not is_dtype_equal( result[name].dtype, self.left[name].dtype ): take_left = self.left[name]._values elif name in self.right: if right_has_missing is None: right_has_missing = (right_indexer == -1).any() if right_has_missing: take_left = self.left_join_keys[i] if not is_dtype_equal( result[name].dtype, self.right[name].dtype ): take_right = self.right[name]._values elif left_indexer is not None: take_left = self.left_join_keys[i] take_right = self.right_join_keys[i] if take_left is not None or take_right is not None: if take_left is None: lvals = result[name]._values else: # TODO: can we pin down take_left's type earlier? take_left = extract_array(take_left, extract_numpy=True) lfill = na_value_for_dtype(take_left.dtype) lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill) if take_right is None: rvals = result[name]._values else: # TODO: can we pin down take_right's type earlier? taker = extract_array(take_right, extract_numpy=True) rfill = na_value_for_dtype(taker.dtype) rvals = algos.take_nd(taker, right_indexer, fill_value=rfill) # if we have an all missing left_indexer # make sure to just use the right values or vice-versa mask_left = left_indexer == -1 # error: Item "bool" of "Union[Any, bool]" has no attribute "all" if mask_left.all(): # type: ignore[union-attr] key_col = Index(rvals) result_dtype = rvals.dtype elif right_indexer is not None and (right_indexer == -1).all(): key_col = Index(lvals) result_dtype = lvals.dtype else: key_col = Index(lvals).where(~mask_left, rvals) result_dtype = find_common_type([lvals.dtype, rvals.dtype]) if result._is_label_reference(name): result[name] = Series( key_col, dtype=result_dtype, index=result.index ) elif result._is_level_reference(name): if isinstance(result.index, MultiIndex): key_col.name = name idx_list = [ result.index.get_level_values(level_name) if level_name != name else key_col for level_name in result.index.names ] result.set_index(idx_list, inplace=True) else: result.index = Index(key_col, name=name) else: result.insert(i, name or f"key_{i}", key_col) def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """return the join indexers""" return get_join_indexers( self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how ) def _get_join_info( self, ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # make mypy happy assert self.how != "cross" left_ax = self.left.axes[self.axis] right_ax = self.right.axes[self.axis] if self.left_index and self.right_index and self.how != "asof": join_index, left_indexer, right_indexer = left_ax.join( right_ax, how=self.how, return_indexers=True, sort=self.sort ) elif self.right_index and self.how == "left": join_index, left_indexer, right_indexer = _left_join_on_index( left_ax, right_ax, self.left_join_keys, sort=self.sort ) elif self.left_index and self.how == "right": join_index, right_indexer, left_indexer = _left_join_on_index( right_ax, left_ax, self.right_join_keys, sort=self.sort ) else: (left_indexer, right_indexer) = self._get_join_indexers() if self.right_index: if len(self.left) > 0: join_index = self._create_join_index( self.left.index, self.right.index, left_indexer, how="right", ) else: join_index = self.right.index.take(right_indexer) elif self.left_index: if self.how == "asof": # GH#33463 asof should always behave like a left merge join_index = self._create_join_index( self.left.index, self.right.index, left_indexer, how="left", ) elif len(self.right) > 0: join_index = self._create_join_index( self.right.index, self.left.index, right_indexer, how="left", ) else: join_index = self.left.index.take(left_indexer) else: join_index = default_index(len(left_indexer)) if len(join_index) == 0 and not isinstance(join_index, MultiIndex): join_index = default_index(0).set_names(join_index.name) return join_index, left_indexer, right_indexer def _create_join_index( self, index: Index, other_index: Index, indexer: npt.NDArray[np.intp], how: JoinHow = "left", ) -> Index: """ Create a join index by rearranging one index to match another Parameters ---------- index : Index being rearranged other_index : Index used to supply values not found in index indexer : np.ndarray[np.intp] how to rearrange index how : str Replacement is only necessary if indexer based on other_index. Returns ------- Index """ if self.how in (how, "outer") and not isinstance(other_index, MultiIndex): # if final index requires values in other_index but not target # index, indexer may hold missing (-1) values, causing Index.take # to take the final value in target index. So, we set the last # element to be the desired fill value. We do not use allow_fill # and fill_value because it throws a ValueError on integer indices mask = indexer == -1 if np.any(mask): fill_value = na_value_for_dtype(index.dtype, compat=False) index = index.append(Index([fill_value])) return index.take(indexer) def _get_merge_keys( self, ) -> tuple[list[AnyArrayLike], list[AnyArrayLike], list[Hashable]]: """ Note: has side effects (copy/delete key columns) Parameters ---------- left right on Returns ------- left_keys, right_keys, join_names """ # left_keys, right_keys entries can actually be anything listlike # with a 'dtype' attr left_keys: list[AnyArrayLike] = [] right_keys: list[AnyArrayLike] = [] join_names: list[Hashable] = [] right_drop: list[Hashable] = [] left_drop: list[Hashable] = [] left, right = self.left, self.right is_lkey = lambda x: is_array_like(x) and len(x) == len(left) is_rkey = lambda x: is_array_like(x) and len(x) == len(right) # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A # user could, for example, request 'left_index' and 'left_by'. In a # regular pd.merge(), users cannot specify both 'left_index' and # 'left_on'. (Instead, users have a MultiIndex). That means the # self.left_on in this function is always empty in a pd.merge(), but # a pd.merge_asof(left_index=True, left_by=...) will result in a # self.left_on array with a None in the middle of it. This requires # a work-around as designated in the code below. # See _validate_left_right_on() for where this happens. # ugh, spaghetti re #733 if _any(self.left_on) and _any(self.right_on): for lk, rk in zip(self.left_on, self.right_on): if is_lkey(lk): lk = cast(AnyArrayLike, lk) left_keys.append(lk) if is_rkey(rk): rk = cast(AnyArrayLike, rk) right_keys.append(rk) join_names.append(None) # what to do? else: # Then we're either Hashable or a wrong-length arraylike, # the latter of which will raise rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) join_names.append(rk) else: # work-around for merge_asof(right_index=True) right_keys.append(right.index) join_names.append(right.index.name) else: if not is_rkey(rk): # Then we're either Hashable or a wrong-length arraylike, # the latter of which will raise rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) else: # work-around for merge_asof(right_index=True) right_keys.append(right.index) if lk is not None and lk == rk: # FIXME: what about other NAs? # avoid key upcast in corner case (length-0) lk = cast(Hashable, lk) if len(left) > 0: right_drop.append(rk) else: left_drop.append(lk) else: rk = cast(AnyArrayLike, rk) right_keys.append(rk) if lk is not None: # Then we're either Hashable or a wrong-length arraylike, # the latter of which will raise lk = cast(Hashable, lk) left_keys.append(left._get_label_or_level_values(lk)) join_names.append(lk) else: # work-around for merge_asof(left_index=True) left_keys.append(left.index) join_names.append(left.index.name) elif _any(self.left_on): for k in self.left_on: if is_lkey(k): k = cast(AnyArrayLike, k) left_keys.append(k) join_names.append(None) else: # Then we're either Hashable or a wrong-length arraylike, # the latter of which will raise k = cast(Hashable, k) left_keys.append(left._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.right.index, MultiIndex): right_keys = [ lev._values.take(lev_codes) for lev, lev_codes in zip( self.right.index.levels, self.right.index.codes ) ] else: right_keys = [self.right.index._values] elif _any(self.right_on): for k in self.right_on: if is_rkey(k): k = cast(AnyArrayLike, k) right_keys.append(k) join_names.append(None) else: # Then we're either Hashable or a wrong-length arraylike, # the latter of which will raise k = cast(Hashable, k) right_keys.append(right._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.left.index, MultiIndex): left_keys = [ lev._values.take(lev_codes) for lev, lev_codes in zip( self.left.index.levels, self.left.index.codes ) ] else: left_keys = [self.left.index._values] if left_drop: self.left = self.left._drop_labels_or_levels(left_drop) if right_drop: self.right = self.right._drop_labels_or_levels(right_drop) return left_keys, right_keys, join_names def _maybe_coerce_merge_keys(self) -> None: # we have valid merges but we may have to further # coerce these if they are originally incompatible types # # for example if these are categorical, but are not dtype_equal # or if we have object and integer dtypes for lk, rk, name in zip( self.left_join_keys, self.right_join_keys, self.join_names ): if (len(lk) and not len(rk)) or (not len(lk) and len(rk)): continue lk = extract_array(lk, extract_numpy=True) rk = extract_array(rk, extract_numpy=True) lk_is_cat = is_categorical_dtype(lk.dtype) rk_is_cat = is_categorical_dtype(rk.dtype) lk_is_object = is_object_dtype(lk.dtype) rk_is_object = is_object_dtype(rk.dtype) # if either left or right is a categorical # then the must match exactly in categories & ordered if lk_is_cat and rk_is_cat: lk = cast(Categorical, lk) rk = cast(Categorical, rk) if lk._categories_match_up_to_permutation(rk): continue elif lk_is_cat or rk_is_cat: pass elif is_dtype_equal(lk.dtype, rk.dtype): continue msg = ( f"You are trying to merge on {lk.dtype} and " f"{rk.dtype} columns. If you wish to proceed you should use pd.concat" ) # if we are numeric, then allow differing # kinds to proceed, eg. int64 and int8, int and float # further if we are object, but we infer to # the same, then proceed if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype): if lk.dtype.kind == rk.dtype.kind: continue # check whether ints and floats if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int with np.errstate(invalid="ignore"): # error: Argument 1 to "astype" of "ndarray" has incompatible # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" casted = lk.astype(rk.dtype) # type: ignore[arg-type] mask = ~np.isnan(lk) match = lk == casted if not match[mask].all(): warnings.warn( "You are merging on int and float " "columns where the float values " "are not equal to their int representation.", UserWarning, stacklevel=find_stack_level(), ) continue if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int with np.errstate(invalid="ignore"): # error: Argument 1 to "astype" of "ndarray" has incompatible # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" casted = rk.astype(lk.dtype) # type: ignore[arg-type] mask = ~np.isnan(rk) match = rk == casted if not match[mask].all(): warnings.warn( "You are merging on int and float " "columns where the float values " "are not equal to their int representation.", UserWarning, stacklevel=find_stack_level(), ) continue # let's infer and see if we are ok if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype( rk, skipna=False ): continue # Check if we are trying to merge on obviously # incompatible dtypes GH 9780, GH 15800 # bool values are coerced to object elif (lk_is_object and is_bool_dtype(rk.dtype)) or ( is_bool_dtype(lk.dtype) and rk_is_object ): pass # object values are allowed to be merged elif (lk_is_object and is_numeric_dtype(rk.dtype)) or ( is_numeric_dtype(lk.dtype) and rk_is_object ): inferred_left = lib.infer_dtype(lk, skipna=False) inferred_right = lib.infer_dtype(rk, skipna=False) bool_types = ["integer", "mixed-integer", "boolean", "empty"] string_types = ["string", "unicode", "mixed", "bytes", "empty"] # inferred bool if inferred_left in bool_types and inferred_right in bool_types: pass # unless we are merging non-string-like with string-like elif ( inferred_left in string_types and inferred_right not in string_types ) or ( inferred_right in string_types and inferred_left not in string_types ): raise ValueError(msg) # datetimelikes must match exactly elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype): raise ValueError(msg) elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype): raise ValueError(msg) elif isinstance(lk.dtype, DatetimeTZDtype) and not isinstance( rk.dtype, DatetimeTZDtype ): raise ValueError(msg) elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance( rk.dtype, DatetimeTZDtype ): raise ValueError(msg) elif lk_is_object and rk_is_object: continue # Houston, we have a problem! # let's coerce to object if the dtypes aren't # categorical, otherwise coerce to the category # dtype. If we coerced categories to object, # then we would lose type information on some # columns, and end up trying to merge # incompatible dtypes. See GH 16900. if name in self.left.columns: typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object self.left = self.left.copy() self.left[name] = self.left[name].astype(typ) if name in self.right.columns: typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object self.right = self.right.copy() self.right[name] = self.right[name].astype(typ) def _create_cross_configuration( self, left: DataFrame, right: DataFrame ) -> tuple[DataFrame, DataFrame, JoinHow, str]: """ Creates the configuration to dispatch the cross operation to inner join, e.g. adding a join column and resetting parameters. Join column is added to a new object, no inplace modification Parameters ---------- left : DataFrame right : DataFrame Returns ------- a tuple (left, right, how, cross_col) representing the adjusted DataFrames with cross_col, the merge operation set to inner and the column to join over. """ cross_col = f"_cross_{uuid.uuid4()}" how: JoinHow = "inner" return ( left.assign(**{cross_col: 1}), right.assign(**{cross_col: 1}), how, cross_col, ) def _validate_left_right_on(self, left_on, right_on): left_on = com.maybe_make_list(left_on) right_on = com.maybe_make_list(right_on) if self.how == "cross": if ( self.left_index or self.right_index or right_on is not None or left_on is not None or self.on is not None ): raise MergeError( "Can not pass on, right_on, left_on or set right_index=True or " "left_index=True" ) # Hm, any way to make this logic less complicated?? elif self.on is None and left_on is None and right_on is None: if self.left_index and self.right_index: left_on, right_on = (), () elif self.left_index: raise MergeError("Must pass right_on or right_index=True") elif self.right_index: raise MergeError("Must pass left_on or left_index=True") else: # use the common columns left_cols = self.left.columns right_cols = self.right.columns common_cols = left_cols.intersection(right_cols) if len(common_cols) == 0: raise MergeError( "No common columns to perform merge on. " f"Merge options: left_on={left_on}, " f"right_on={right_on}, " f"left_index={self.left_index}, " f"right_index={self.right_index}" ) if ( not left_cols.join(common_cols, how="inner").is_unique or not right_cols.join(common_cols, how="inner").is_unique ): raise MergeError(f"Data columns not unique: {repr(common_cols)}") left_on = right_on = common_cols elif self.on is not None: if left_on is not None or right_on is not None: raise MergeError( 'Can only pass argument "on" OR "left_on" ' 'and "right_on", not a combination of both.' ) if self.left_index or self.right_index: raise MergeError( 'Can only pass argument "on" OR "left_index" ' 'and "right_index", not a combination of both.' ) left_on = right_on = self.on elif left_on is not None: if self.left_index: raise MergeError( 'Can only pass argument "left_on" OR "left_index" not both.' ) if not self.right_index and right_on is None: raise MergeError('Must pass "right_on" OR "right_index".') n = len(left_on) if self.right_index: if len(left_on) != self.right.index.nlevels: raise ValueError( "len(left_on) must equal the number " 'of levels in the index of "right"' ) right_on = [None] * n elif right_on is not None: if self.right_index: raise MergeError( 'Can only pass argument "right_on" OR "right_index" not both.' ) if not self.left_index and left_on is None: raise MergeError('Must pass "left_on" OR "left_index".') n = len(right_on) if self.left_index: if len(right_on) != self.left.index.nlevels: raise ValueError( "len(right_on) must equal the number " 'of levels in the index of "left"' ) left_on = [None] * n if self.how != "cross" and len(right_on) != len(left_on): raise ValueError("len(right_on) must equal len(left_on)") return left_on, right_on def _validate(self, validate: str) -> None: # Check uniqueness of each if self.left_index: left_unique = self.orig_left.index.is_unique else: left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique if self.right_index: right_unique = self.orig_right.index.is_unique else: right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique # Check data integrity if validate in ["one_to_one", "1:1"]: if not left_unique and not right_unique: raise MergeError( "Merge keys are not unique in either left " "or right dataset; not a one-to-one merge" ) if not left_unique: raise MergeError( "Merge keys are not unique in left dataset; not a one-to-one merge" ) if not right_unique: raise MergeError( "Merge keys are not unique in right dataset; not a one-to-one merge" ) elif validate in ["one_to_many", "1:m"]: if not left_unique: raise MergeError( "Merge keys are not unique in left dataset; not a one-to-many merge" ) elif validate in ["many_to_one", "m:1"]: if not right_unique: raise MergeError( "Merge keys are not unique in right dataset; " "not a many-to-one merge" ) elif validate in ["many_to_many", "m:m"]: pass else: raise ValueError( f'"{validate}" is not a valid argument. ' "Valid arguments are:\n" '- "1:1"\n' '- "1:m"\n' '- "m:1"\n' '- "m:m"\n' '- "one_to_one"\n' '- "one_to_many"\n' '- "many_to_one"\n' '- "many_to_many"' ) IndexLabel = Union[Hashable, Sequence[Hashable]] Suffixes = Tuple[Optional[str], Optional[str]] MergeHow = Literal["left", "right", "inner", "outer", "cross"] def merge( left: DataFrame | Series, right: DataFrame | Series, how: MergeHow = "inner", on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, suffixes: Suffixes = ("_x", "_y"), copy: bool | None = None, indicator: str | bool = False, validate: str | None = None, ) -> DataFrame: op = _MergeOperation( left, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate, ) return op.get_result(copy=copy)
null
173,130
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): """ groupby & merge; we are always performing a left-by type operation Parameters ---------- by: field to group left: DataFrame right: DataFrame merge_pieces: function for merging """ pieces = [] if not isinstance(by, (list, tuple)): by = [by] lby = left.groupby(by, sort=False) rby: groupby.DataFrameGroupBy | None = None # if we can groupby the rhs # then we can get vastly better perf if all(item in right.columns for item in by): rby = right.groupby(by, sort=False) for key, lhs in lby.grouper.get_iterator(lby._selected_obj, axis=lby.axis): if rby is None: rhs = right else: try: rhs = right.take(rby.indices[key]) except KeyError: # key doesn't exist in left lcols = lhs.columns.tolist() cols = lcols + [r for r in right.columns if r not in set(lcols)] merged = lhs.reindex(columns=cols) merged.index = range(len(merged)) pieces.append(merged) continue merged = merge_pieces(lhs, rhs) # make sure join keys are in the merged # TODO, should merge_pieces do this? merged[by] = key pieces.append(merged) # preserve the original order # if we have a missing piece this can be reset from pandas.core.reshape.concat import concat result = concat(pieces, ignore_index=True) result = result.reindex(columns=pieces[0].columns, copy=False) return result, lby class _OrderedMerge(_MergeOperation): _merge_type = "ordered_merge" def __init__( self, left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, axis: AxisInt = 1, suffixes: Suffixes = ("_x", "_y"), fill_method: str | None = None, how: JoinHow | Literal["asof"] = "outer", ) -> None: self.fill_method = fill_method _MergeOperation.__init__( self, left, right, on=on, left_on=left_on, left_index=left_index, right_index=right_index, right_on=right_on, axis=axis, how=how, suffixes=suffixes, sort=True, # factorize sorts ) def get_result(self, copy: bool | None = True) -> DataFrame: join_index, left_indexer, right_indexer = self._get_join_info() llabels, rlabels = _items_overlap_with_suffix( self.left._info_axis, self.right._info_axis, self.suffixes ) left_join_indexer: np.ndarray | None right_join_indexer: np.ndarray | None if self.fill_method == "ffill": if left_indexer is None: raise TypeError("left_indexer cannot be None") left_indexer, right_indexer = cast(np.ndarray, left_indexer), cast( np.ndarray, right_indexer ) left_join_indexer = libjoin.ffill_indexer(left_indexer) right_join_indexer = libjoin.ffill_indexer(right_indexer) else: left_join_indexer = left_indexer right_join_indexer = right_indexer result = self._reindex_and_concat( join_index, left_join_indexer, right_join_indexer, copy=copy ) self._maybe_add_join_keys(result, left_indexer, right_indexer) return result IndexLabel = Union[Hashable, Sequence[Hashable]] Suffixes = Tuple[Optional[str], Optional[str]] JoinHow = Literal["left", "right", "inner", "outer"] The provided code snippet includes necessary dependencies for implementing the `merge_ordered` function. Write a Python function `def merge_ordered( left: DataFrame, right: DataFrame, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_by=None, right_by=None, fill_method: str | None = None, suffixes: Suffixes = ("_x", "_y"), how: JoinHow = "outer", ) -> DataFrame` to solve the following problem: Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"] ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0 Here is the function: def merge_ordered( left: DataFrame, right: DataFrame, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_by=None, right_by=None, fill_method: str | None = None, suffixes: Suffixes = ("_x", "_y"), how: JoinHow = "outer", ) -> DataFrame: """ Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"] ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0 """ def _merger(x, y) -> DataFrame: # perform the ordered merge operation op = _OrderedMerge( x, y, on=on, left_on=left_on, right_on=right_on, suffixes=suffixes, fill_method=fill_method, how=how, ) return op.get_result() if left_by is not None and right_by is not None: raise ValueError("Can only group either left or right frames") if left_by is not None: if isinstance(left_by, str): left_by = [left_by] check = set(left_by).difference(left.columns) if len(check) != 0: raise KeyError(f"{check} not found in left columns") result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) elif right_by is not None: if isinstance(right_by, str): right_by = [right_by] check = set(right_by).difference(right.columns) if len(check) != 0: raise KeyError(f"{check} not found in right columns") result, _ = _groupby_and_merge( right_by, right, left, lambda x, y: _merger(y, x) ) else: result = _merger(left, right) return result
Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"] ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0
173,131
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible class _AsOfMerge(_OrderedMerge): _merge_type = "asof_merge" def __init__( self, left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, by=None, left_by=None, right_by=None, axis: AxisInt = 1, suffixes: Suffixes = ("_x", "_y"), copy: bool = True, fill_method: str | None = None, how: Literal["asof"] = "asof", tolerance=None, allow_exact_matches: bool = True, direction: str = "backward", ) -> None: self.by = by self.left_by = left_by self.right_by = right_by self.tolerance = tolerance self.allow_exact_matches = allow_exact_matches self.direction = direction _OrderedMerge.__init__( self, left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, axis=axis, how=how, suffixes=suffixes, fill_method=fill_method, ) def _validate_left_right_on(self, left_on, right_on): left_on, right_on = super()._validate_left_right_on(left_on, right_on) # we only allow on to be a single item for on if len(left_on) != 1 and not self.left_index: raise MergeError("can only asof on a key for left") if len(right_on) != 1 and not self.right_index: raise MergeError("can only asof on a key for right") if self.left_index and isinstance(self.left.index, MultiIndex): raise MergeError("left can only have one index") if self.right_index and isinstance(self.right.index, MultiIndex): raise MergeError("right can only have one index") # set 'by' columns if self.by is not None: if self.left_by is not None or self.right_by is not None: raise MergeError("Can only pass by OR left_by and right_by") self.left_by = self.right_by = self.by if self.left_by is None and self.right_by is not None: raise MergeError("missing left_by") if self.left_by is not None and self.right_by is None: raise MergeError("missing right_by") # GH#29130 Check that merge keys do not have dtype object if not self.left_index: left_on_0 = left_on[0] if is_array_like(left_on_0): lo_dtype = left_on_0.dtype else: lo_dtype = ( self.left._get_label_or_level_values(left_on_0).dtype if left_on_0 in self.left.columns else self.left.index.get_level_values(left_on_0) ) else: lo_dtype = self.left.index.dtype if not self.right_index: right_on_0 = right_on[0] if is_array_like(right_on_0): ro_dtype = right_on_0.dtype else: ro_dtype = ( self.right._get_label_or_level_values(right_on_0).dtype if right_on_0 in self.right.columns else self.right.index.get_level_values(right_on_0) ) else: ro_dtype = self.right.index.dtype if is_object_dtype(lo_dtype) or is_object_dtype(ro_dtype): raise MergeError( f"Incompatible merge dtype, {repr(ro_dtype)} and " f"{repr(lo_dtype)}, both sides must have numeric dtype" ) # add 'by' to our key-list so we can have it in the # output as a key if self.left_by is not None: if not is_list_like(self.left_by): self.left_by = [self.left_by] if not is_list_like(self.right_by): self.right_by = [self.right_by] if len(self.left_by) != len(self.right_by): raise MergeError("left_by and right_by must be same length") left_on = self.left_by + list(left_on) right_on = self.right_by + list(right_on) # check 'direction' is valid if self.direction not in ["backward", "forward", "nearest"]: raise MergeError(f"direction invalid: {self.direction}") return left_on, right_on def _get_merge_keys( self, ) -> tuple[list[AnyArrayLike], list[AnyArrayLike], list[Hashable]]: # note this function has side effects (left_join_keys, right_join_keys, join_names) = super()._get_merge_keys() # validate index types are the same for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): if is_categorical_dtype(lk.dtype) and is_categorical_dtype(rk.dtype): # The generic error message is confusing for categoricals. # # In this function, the join keys include both the original # ones of the merge_asof() call, and also the keys passed # to its by= argument. Unordered but equal categories # are not supported for the former, but will fail # later with a ValueError, so we don't *need* to check # for them here. msg = ( f"incompatible merge keys [{i}] {repr(lk.dtype)} and " f"{repr(rk.dtype)}, both sides category, but not equal ones" ) else: msg = ( f"incompatible merge keys [{i}] {repr(lk.dtype)} and " f"{repr(rk.dtype)}, must be the same type" ) raise MergeError(msg) # validate tolerance; datetime.timedelta or Timedelta if we have a DTI if self.tolerance is not None: if self.left_index: # Actually more specifically an Index lt = cast(AnyArrayLike, self.left.index) else: lt = left_join_keys[-1] msg = ( f"incompatible tolerance {self.tolerance}, must be compat " f"with type {repr(lt.dtype)}" ) if needs_i8_conversion(lt): if not isinstance(self.tolerance, datetime.timedelta): raise MergeError(msg) if self.tolerance < Timedelta(0): raise MergeError("tolerance must be positive") elif is_integer_dtype(lt): if not is_integer(self.tolerance): raise MergeError(msg) if self.tolerance < 0: raise MergeError("tolerance must be positive") elif is_float_dtype(lt): if not is_number(self.tolerance): raise MergeError(msg) if self.tolerance < 0: raise MergeError("tolerance must be positive") else: raise MergeError("key must be integer, timestamp or float") # validate allow_exact_matches if not is_bool(self.allow_exact_matches): msg = ( "allow_exact_matches must be boolean, " f"passed {self.allow_exact_matches}" ) raise MergeError(msg) return left_join_keys, right_join_keys, join_names def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """return the join indexers""" def flip(xs) -> np.ndarray: """unlike np.transpose, this returns an array of tuples""" def injection(obj): if not is_extension_array_dtype(obj): # ndarray return obj obj = extract_array(obj) if isinstance(obj, NDArrayBackedExtensionArray): # fastpath for e.g. dt64tz, categorical return obj._ndarray # FIXME: returning obj._values_for_argsort() here doesn't # break in any existing test cases, but i (@jbrockmendel) # am pretty sure it should! # e.g. # arr = pd.array([0, pd.NA, 255], dtype="UInt8") # will have values_for_argsort (before GH#45434) # np.array([0, 255, 255], dtype=np.uint8) # and the non-injectivity should make a difference somehow # shouldn't it? return np.asarray(obj) xs = [injection(x) for x in xs] labels = list(string.ascii_lowercase[: len(xs)]) dtypes = [x.dtype for x in xs] labeled_dtypes = list(zip(labels, dtypes)) return np.array(list(zip(*xs)), labeled_dtypes) # values to compare left_values = ( self.left.index._values if self.left_index else self.left_join_keys[-1] ) right_values = ( self.right.index._values if self.right_index else self.right_join_keys[-1] ) tolerance = self.tolerance # we require sortedness and non-null values in the join keys if not Index(left_values).is_monotonic_increasing: side = "left" if isna(left_values).any(): raise ValueError(f"Merge keys contain null values on {side} side") raise ValueError(f"{side} keys must be sorted") if not Index(right_values).is_monotonic_increasing: side = "right" if isna(right_values).any(): raise ValueError(f"Merge keys contain null values on {side} side") raise ValueError(f"{side} keys must be sorted") # initial type conversion as needed if needs_i8_conversion(left_values): if tolerance is not None: tolerance = Timedelta(tolerance) # TODO: we have no test cases with PeriodDtype here; probably # need to adjust tolerance for that case. if left_values.dtype.kind in ["m", "M"]: # Make sure the i8 representation for tolerance # matches that for left_values/right_values. lvs = ensure_wrapped_if_datetimelike(left_values) tolerance = tolerance.as_unit(lvs.unit) tolerance = tolerance._value # TODO: require left_values.dtype == right_values.dtype, or at least # comparable for e.g. dt64tz left_values = left_values.view("i8") right_values = right_values.view("i8") # a "by" parameter requires special handling if self.left_by is not None: # remove 'on' parameter from values if one existed if self.left_index and self.right_index: left_by_values = self.left_join_keys right_by_values = self.right_join_keys else: left_by_values = self.left_join_keys[0:-1] right_by_values = self.right_join_keys[0:-1] # get tuple representation of values if more than one if len(left_by_values) == 1: lbv = left_by_values[0] rbv = right_by_values[0] else: # We get here with non-ndarrays in test_merge_by_col_tz_aware # and test_merge_groupby_multiple_column_with_categorical_column lbv = flip(left_by_values) rbv = flip(right_by_values) # upcast 'by' parameter because HashTable is limited by_type = _get_cython_type_upcast(lbv.dtype) by_type_caster = _type_casters[by_type] # error: Incompatible types in assignment (expression has type # "ndarray[Any, dtype[generic]]", variable has type # "List[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]]") left_by_values = by_type_caster(lbv) # type: ignore[assignment] # error: Incompatible types in assignment (expression has type # "ndarray[Any, dtype[generic]]", variable has type # "List[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]]") right_by_values = by_type_caster(rbv) # type: ignore[assignment] # choose appropriate function by type func = _asof_by_function(self.direction) return func( left_values, right_values, left_by_values, right_by_values, self.allow_exact_matches, tolerance, ) else: # choose appropriate function by type func = _asof_by_function(self.direction) # TODO(cython3): # Bug in beta1 preventing Cython from choosing # right specialization when one fused memview is None # Doesn't matter what type we choose # (nothing happens anyways since it is None) # GH 51640 return func[f"{left_values.dtype}_t", object]( left_values, right_values, None, None, self.allow_exact_matches, tolerance, False, ) IndexLabel = Union[Hashable, Sequence[Hashable]] Suffixes = Tuple[Optional[str], Optional[str]] The provided code snippet includes necessary dependencies for implementing the `merge_asof` function. Write a Python function `def merge_asof( left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, by=None, left_by=None, right_by=None, suffixes: Suffixes = ("_x", "_y"), tolerance=None, allow_exact_matches: bool = True, direction: str = "backward", ) -> DataFrame` to solve the following problem: Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. The default is "backward" and is compatible in versions below 0.20.0. The direction parameter was added in version 0.20.0 and introduces "forward" and "nearest". Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075") ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT" ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048") ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100] ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN Here is the function: def merge_asof( left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, by=None, left_by=None, right_by=None, suffixes: Suffixes = ("_x", "_y"), tolerance=None, allow_exact_matches: bool = True, direction: str = "backward", ) -> DataFrame: """ Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. The default is "backward" and is compatible in versions below 0.20.0. The direction parameter was added in version 0.20.0 and introduces "forward" and "nearest". Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075") ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT" ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048") ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100] ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN """ op = _AsOfMerge( left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, by=by, left_by=left_by, right_by=right_by, suffixes=suffixes, how="asof", tolerance=tolerance, allow_exact_matches=allow_exact_matches, direction=direction, ) return op.get_result()
Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. The default is "backward" and is compatible in versions below 0.20.0. The direction parameter was added in version 0.20.0 and introduces "forward" and "nearest". Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075") ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT" ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048") ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100] ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
173,132
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """Return empty join indexers.""" return ( np.array([], dtype=np.intp), np.array([], dtype=np.intp), ) def _get_no_sort_one_missing_indexer( n: int, left_missing: bool ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Return join indexers where all of one side is selected without sorting and none of the other side is selected. Parameters ---------- n : int Length of indexers to create. left_missing : bool If True, the left indexer will contain only -1's. If False, the right indexer will contain only -1's. Returns ------- np.ndarray[np.intp] Left indexer np.ndarray[np.intp] Right indexer """ idx = np.arange(n, dtype=np.intp) idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp) if left_missing: return idx_missing, idx return idx, idx_missing def _factorize_keys( lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: MergeHow | Literal["asof"] = "inner", ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: """ Encode left and right keys as enumerated types. This is used to get the join indexers to be used when merging DataFrames. Parameters ---------- lk : array-like Left key. rk : array-like Right key. sort : bool, defaults to True If True, the encoding is done such that the unique elements in the keys are sorted. how : {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’ Type of merge. Returns ------- np.ndarray[np.intp] Left (resp. right if called with `key='right'`) labels, as enumerated type. np.ndarray[np.intp] Right (resp. left if called with `key='right'`) labels, as enumerated type. int Number of unique elements in union of left and right labels. See Also -------- merge : Merge DataFrame or named Series objects with a database-style join. algorithms.factorize : Encode the object as an enumerated type or categorical variable. Examples -------- >>> lk = np.array(["a", "c", "b"]) >>> rk = np.array(["a", "c"]) Here, the unique values are `'a', 'b', 'c'`. With the default `sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`: >>> pd.core.reshape.merge._factorize_keys(lk, rk) (array([0, 2, 1]), array([0, 2]), 3) With the `sort=False`, the encoding will correspond to the order in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`: >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False) (array([0, 1, 2]), array([0, 1]), 3) """ # Some pre-processing for non-ndarray lk / rk lk = extract_array(lk, extract_numpy=True, extract_range=True) rk = extract_array(rk, extract_numpy=True, extract_range=True) # TODO: if either is a RangeIndex, we can likely factorize more efficiently? if isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype): # Extract the ndarray (UTC-localized) values # Note: we dont need the dtypes to match, as these can still be compared # TODO(non-nano): need to make sure resolutions match lk = cast("DatetimeArray", lk)._ndarray rk = cast("DatetimeArray", rk)._ndarray elif ( is_categorical_dtype(lk.dtype) and is_categorical_dtype(rk.dtype) and is_dtype_equal(lk.dtype, rk.dtype) ): assert isinstance(lk, Categorical) assert isinstance(rk, Categorical) # Cast rk to encoding so we can compare codes with lk rk = lk._encode_with_my_categories(rk) lk = ensure_int64(lk.codes) rk = ensure_int64(rk.codes) elif isinstance(lk, ExtensionArray) and is_dtype_equal(lk.dtype, rk.dtype): if not isinstance(lk, BaseMaskedArray): lk, _ = lk._values_for_factorize() # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute # "_values_for_factorize" rk, _ = rk._values_for_factorize() # type: ignore[union-attr] klass, lk, rk = _convert_arrays_and_get_rizer_klass(lk, rk) rizer = klass(max(len(lk), len(rk))) if isinstance(lk, BaseMaskedArray): assert isinstance(rk, BaseMaskedArray) llab = rizer.factorize(lk._data, mask=lk._mask) rlab = rizer.factorize(rk._data, mask=rk._mask) else: # Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type # "Union[ndarray[Any, dtype[signedinteger[_64Bit]]], # ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]" llab = rizer.factorize(lk) # type: ignore[arg-type] rlab = rizer.factorize(rk) # type: ignore[arg-type] assert llab.dtype == np.dtype(np.intp), llab.dtype assert rlab.dtype == np.dtype(np.intp), rlab.dtype count = rizer.get_count() if sort: uniques = rizer.uniques.to_array() llab, rlab = _sort_labels(uniques, llab, rlab) # NA group lmask = llab == -1 lany = lmask.any() rmask = rlab == -1 rany = rmask.any() if lany or rany: if lany: np.putmask(llab, lmask, count) if rany: np.putmask(rlab, rmask, count) count += 1 if how == "right": return rlab, llab, count return llab, rlab, count def _get_join_keys( llab: list[npt.NDArray[np.int64 | np.intp]], rlab: list[npt.NDArray[np.int64 | np.intp]], shape: Shape, sort: bool, ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: # how many levels can be done without overflow nlev = next( lev for lev in range(len(shape), 0, -1) if not is_int64_overflow_possible(shape[:lev]) ) # get keys for the first `nlev` levels stride = np.prod(shape[1:nlev], dtype="i8") lkey = stride * llab[0].astype("i8", subok=False, copy=False) rkey = stride * rlab[0].astype("i8", subok=False, copy=False) for i in range(1, nlev): with np.errstate(divide="ignore"): stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride if nlev == len(shape): # all done! return lkey, rkey # densify current keys to avoid overflow lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) llab = [lkey] + llab[nlev:] rlab = [rkey] + rlab[nlev:] shape = (count,) + shape[nlev:] return _get_join_keys(llab, rlab, shape, sort) Literal: _SpecialForm = ... MergeHow = Literal["left", "right", "inner", "outer", "cross"] The provided code snippet includes necessary dependencies for implementing the `get_join_indexers` function. Write a Python function `def get_join_indexers( left_keys, right_keys, sort: bool = False, how: MergeHow | Literal["asof"] = "inner", **kwargs, ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]` to solve the following problem: Parameters ---------- left_keys : ndarray, Index, Series right_keys : ndarray, Index, Series sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] Indexer into the left_keys. np.ndarray[np.intp] Indexer into the right_keys. Here is the function: def get_join_indexers( left_keys, right_keys, sort: bool = False, how: MergeHow | Literal["asof"] = "inner", **kwargs, ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Parameters ---------- left_keys : ndarray, Index, Series right_keys : ndarray, Index, Series sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] Indexer into the left_keys. np.ndarray[np.intp] Indexer into the right_keys. """ assert len(left_keys) == len( right_keys ), "left_key and right_keys must be the same length" # fast-path for empty left/right left_n = len(left_keys[0]) right_n = len(right_keys[0]) if left_n == 0: if how in ["left", "inner", "cross"]: return _get_empty_indexer() elif not sort and how in ["right", "outer"]: return _get_no_sort_one_missing_indexer(right_n, True) elif right_n == 0: if how in ["right", "inner", "cross"]: return _get_empty_indexer() elif not sort and how in ["left", "outer"]: return _get_no_sort_one_missing_indexer(left_n, False) # get left & right join labels and num. of levels at each location mapped = ( _factorize_keys(left_keys[n], right_keys[n], sort=sort, how=how) for n in range(len(left_keys)) ) zipped = zip(*mapped) llab, rlab, shape = (list(x) for x in zipped) # get flat i8 keys from label lists lkey, rkey = _get_join_keys(llab, rlab, tuple(shape), sort) # factorize keys to a dense i8 space # `count` is the num. of unique keys # set(lkey) | set(rkey) == range(count) lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort, how=how) # preserve left frame order if how == 'left' and sort == False kwargs = cp.copy(kwargs) if how in ("left", "right"): kwargs["sort"] = sort join_func = { "inner": libjoin.inner_join, "left": libjoin.left_outer_join, "right": lambda x, y, count, **kwargs: libjoin.left_outer_join( y, x, count, **kwargs )[::-1], "outer": libjoin.full_outer_join, }[how] # error: Cannot call function of unknown type return join_func(lkey, rkey, count, **kwargs) # type: ignore[operator]
Parameters ---------- left_keys : ndarray, Index, Series right_keys : ndarray, Index, Series sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] Indexer into the left_keys. np.ndarray[np.intp] Indexer into the right_keys.
173,133
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... ) The provided code snippet includes necessary dependencies for implementing the `restore_dropped_levels_multijoin` function. Write a Python function `def restore_dropped_levels_multijoin( left: MultiIndex, right: MultiIndex, dropped_level_names, join_index: Index, lindexer: npt.NDArray[np.intp], rindexer: npt.NDArray[np.intp], ) -> tuple[list[Index], npt.NDArray[np.intp], list[Hashable]]` to solve the following problem: *this is an internal non-public method* Returns the levels, labels and names of a multi-index to multi-index join. Depending on the type of join, this method restores the appropriate dropped levels of the joined multi-index. The method relies on lindexer, rindexer which hold the index positions of left and right, where a join was feasible Parameters ---------- left : MultiIndex left index right : MultiIndex right index dropped_level_names : str array list of non-common level names join_index : Index the index of the join between the common levels of left and right lindexer : np.ndarray[np.intp] left indexer rindexer : np.ndarray[np.intp] right indexer Returns ------- levels : list of Index levels of combined multiindexes labels : np.ndarray[np.intp] labels of combined multiindexes names : List[Hashable] names of combined multiindex levels Here is the function: def restore_dropped_levels_multijoin( left: MultiIndex, right: MultiIndex, dropped_level_names, join_index: Index, lindexer: npt.NDArray[np.intp], rindexer: npt.NDArray[np.intp], ) -> tuple[list[Index], npt.NDArray[np.intp], list[Hashable]]: """ *this is an internal non-public method* Returns the levels, labels and names of a multi-index to multi-index join. Depending on the type of join, this method restores the appropriate dropped levels of the joined multi-index. The method relies on lindexer, rindexer which hold the index positions of left and right, where a join was feasible Parameters ---------- left : MultiIndex left index right : MultiIndex right index dropped_level_names : str array list of non-common level names join_index : Index the index of the join between the common levels of left and right lindexer : np.ndarray[np.intp] left indexer rindexer : np.ndarray[np.intp] right indexer Returns ------- levels : list of Index levels of combined multiindexes labels : np.ndarray[np.intp] labels of combined multiindexes names : List[Hashable] names of combined multiindex levels """ def _convert_to_multiindex(index: Index) -> MultiIndex: if isinstance(index, MultiIndex): return index else: return MultiIndex.from_arrays([index._values], names=[index.name]) # For multi-multi joins with one overlapping level, # the returned index if of type Index # Assure that join_index is of type MultiIndex # so that dropped levels can be appended join_index = _convert_to_multiindex(join_index) join_levels = join_index.levels join_codes = join_index.codes join_names = join_index.names # Iterate through the levels that must be restored for dropped_level_name in dropped_level_names: if dropped_level_name in left.names: idx = left indexer = lindexer else: idx = right indexer = rindexer # The index of the level name to be restored name_idx = idx.names.index(dropped_level_name) restore_levels = idx.levels[name_idx] # Inject -1 in the codes list where a join was not possible # IOW indexer[i]=-1 codes = idx.codes[name_idx] if indexer is None: restore_codes = codes else: restore_codes = algos.take_nd(codes, indexer, fill_value=-1) # error: Cannot determine type of "__add__" join_levels = join_levels + [restore_levels] # type: ignore[has-type] join_codes = join_codes + [restore_codes] join_names = join_names + [dropped_level_name] return join_levels, join_codes, join_names
*this is an internal non-public method* Returns the levels, labels and names of a multi-index to multi-index join. Depending on the type of join, this method restores the appropriate dropped levels of the joined multi-index. The method relies on lindexer, rindexer which hold the index positions of left and right, where a join was feasible Parameters ---------- left : MultiIndex left index right : MultiIndex right index dropped_level_names : str array list of non-common level names join_index : Index the index of the join between the common levels of left and right lindexer : np.ndarray[np.intp] left indexer rindexer : np.ndarray[np.intp] right indexer Returns ------- levels : list of Index levels of combined multiindexes labels : np.ndarray[np.intp] labels of combined multiindexes names : List[Hashable] names of combined multiindex levels
173,134
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _asof_by_function(direction: str): name = f"asof_join_{direction}_on_X_by_Y" return getattr(libjoin, name, None)
null
173,135
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible DtypeObj = Union[np.dtype, "ExtensionDtype"] def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) The provided code snippet includes necessary dependencies for implementing the `_get_cython_type_upcast` function. Write a Python function `def _get_cython_type_upcast(dtype: DtypeObj) -> str` to solve the following problem: Upcast a dtype to 'int64_t', 'double', or 'object Here is the function: def _get_cython_type_upcast(dtype: DtypeObj) -> str: """Upcast a dtype to 'int64_t', 'double', or 'object'""" if is_integer_dtype(dtype): return "int64_t" elif is_float_dtype(dtype): return "double" else: return "object"
Upcast a dtype to 'int64_t', 'double', or 'object
173,136
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _get_multiindex_indexer( join_keys, index: MultiIndex, sort: bool ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: def _get_single_indexer( join_key, index: Index, sort: bool = False ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: def _left_join_on_index( left_ax: Index, right_ax: Index, join_keys, sort: bool = False ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]: if len(join_keys) > 1: if not ( isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels ): raise AssertionError( "If more than one join key is given then " "'right_ax' must be a MultiIndex and the " "number of join keys must be the number of levels in right_ax" ) left_indexer, right_indexer = _get_multiindex_indexer( join_keys, right_ax, sort=sort ) else: jkey = join_keys[0] left_indexer, right_indexer = _get_single_indexer(jkey, right_ax, sort=sort) if sort or len(left_ax) != len(left_indexer): # if asked to sort or there are 1-to-many matches join_index = left_ax.take(left_indexer) return join_index, left_indexer, right_indexer # left frame preserves order & length of its index return left_ax, None, right_indexer
null
173,137
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _should_fill(lname, rname) -> bool: if not isinstance(lname, str) or not isinstance(rname, str): return True return lname == rname
null
173,138
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible def _any(x) -> bool: return x is not None and com.any_not_none(*x)
null
173,139
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) def _validate_operand(obj: DataFrame | Series) -> DataFrame: if isinstance(obj, ABCDataFrame): return obj elif isinstance(obj, ABCSeries): if obj.name is None: raise ValueError("Cannot merge a Series without a name") return obj.to_frame() else: raise TypeError( f"Can only merge Series or DataFrame objects, a {type(obj)} was passed" )
null
173,140
from __future__ import annotations import copy as cp import datetime from functools import partial import string from typing import ( TYPE_CHECKING, Hashable, Literal, Sequence, cast, ) import uuid import warnings import numpy as np from pandas._libs import ( Timedelta, hashtable as libhashtable, join as libjoin, lib, ) from pandas._libs.lib import is_range_indexer from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt, ) from pandas.errors import MergeError from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas import ( Categorical, Index, MultiIndex, Series, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( BaseMaskedArray, ExtensionArray, ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible class partial(Generic[_T]): func: Callable[..., _T] args: Tuple[Any, ...] keywords: Dict[str, Any] def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> _T: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... Suffixes = Tuple[Optional[str], Optional[str]] class MergeError(ValueError): """ Exception raised when merging data. Subclass of ``ValueError``. """ The provided code snippet includes necessary dependencies for implementing the `_items_overlap_with_suffix` function. Write a Python function `def _items_overlap_with_suffix( left: Index, right: Index, suffixes: Suffixes ) -> tuple[Index, Index]` to solve the following problem: Suffixes type validation. If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. Here is the function: def _items_overlap_with_suffix( left: Index, right: Index, suffixes: Suffixes ) -> tuple[Index, Index]: """ Suffixes type validation. If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict): raise TypeError( f"Passing 'suffixes' as a {type(suffixes)}, is not supported. " "Provide 'suffixes' as a tuple instead." ) to_rename = left.intersection(right) if len(to_rename) == 0: return left, right lsuffix, rsuffix = suffixes if not lsuffix and not rsuffix: raise ValueError(f"columns overlap but no suffix specified: {to_rename}") def renamer(x, suffix): """ Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return f"{x}{suffix}" return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) llabels = left._transform_index(lrenamer) rlabels = right._transform_index(rrenamer) dups = [] if not llabels.is_unique: # Only warn when duplicates are caused because of suffixes, already duplicated # columns in origin should not warn dups = llabels[(llabels.duplicated()) & (~left.duplicated())].tolist() if not rlabels.is_unique: dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist()) if dups: raise MergeError( f"Passing 'suffixes' which cause duplicate columns {set(dups)} is " f"not allowed.", ) return llabels, rlabels
Suffixes type validation. If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string.
173,141
from __future__ import annotations from typing import ( TYPE_CHECKING, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, IndexLabel, ) from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.common import ( is_extension_array_dtype, is_integer_dtype, is_list_like, is_nested_list_like, is_scalar, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) import pandas.core.common as com from pandas.core.frame import _shared_docs from pandas.core.groupby import Grouper from pandas.core.indexes.api import ( Index, MultiIndex, get_objs_combined_axis, ) from pandas.core.reshape.concat import concat from pandas.core.reshape.util import cartesian_product from pandas.core.series import Series class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... IndexLabel = Union[Hashable, Sequence[Hashable]] class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series def pivot( data: DataFrame, *, columns: IndexLabel, index: IndexLabel | lib.NoDefault = lib.NoDefault, values: IndexLabel | lib.NoDefault = lib.NoDefault, ) -> DataFrame: columns_listlike = com.convert_to_list_like(columns) # If columns is None we will create a MultiIndex level with None as name # which might cause duplicated names because None is the default for # level names data.index.names = [ name if name is not None else lib.NoDefault for name in data.index.names ] indexed: DataFrame | Series if values is lib.NoDefault: if index is not lib.NoDefault: cols = com.convert_to_list_like(index) else: cols = [] append = index is lib.NoDefault # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray") # error: Unsupported left operand type for + ("ExtensionArray") indexed = data.set_index( cols + columns_listlike, append=append # type: ignore[operator] ) else: if index is lib.NoDefault: if isinstance(data.index, MultiIndex): # GH 23955 index_list = [ data.index.get_level_values(i) for i in range(data.index.nlevels) ] else: index_list = [Series(data.index, name=data.index.name)] else: index_list = [data[idx] for idx in com.convert_to_list_like(index)] data_columns = [data[col] for col in columns_listlike] index_list.extend(data_columns) multiindex = MultiIndex.from_arrays(index_list) if is_list_like(values) and not isinstance(values, tuple): # Exclude tuple because it is seen as a single column name values = cast(Sequence[Hashable], values) indexed = data._constructor( data[values]._values, index=multiindex, columns=values ) else: indexed = data._constructor_sliced(data[values]._values, index=multiindex) # error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union # [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected # "Hashable" result = indexed.unstack(columns_listlike) # type: ignore[arg-type] result.index.names = [ name if name is not lib.NoDefault else None for name in result.index.names ] return result
null
173,142
from __future__ import annotations from typing import ( TYPE_CHECKING, Callable, Hashable, Sequence, cast, ) import numpy as np from pandas._libs import lib from pandas._typing import ( AggFuncType, AggFuncTypeBase, AggFuncTypeDict, IndexLabel, ) from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.common import ( is_extension_array_dtype, is_integer_dtype, is_list_like, is_nested_list_like, is_scalar, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) import pandas.core.common as com from pandas.core.frame import _shared_docs from pandas.core.groupby import Grouper from pandas.core.indexes.api import ( Index, MultiIndex, get_objs_combined_axis, ) from pandas.core.reshape.concat import concat from pandas.core.reshape.util import cartesian_product from pandas.core.series import Series def pivot_table( data: DataFrame, values=None, index=None, columns=None, aggfunc: AggFuncType = "mean", fill_value=None, margins: bool = False, dropna: bool = True, margins_name: Hashable = "All", observed: bool = False, sort: bool = True, ) -> DataFrame: index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces: list[DataFrame] = [] keys = [] for func in aggfunc: _table = __internal_pivot_table( data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, sort=sort, ) pieces.append(_table) keys.append(getattr(func, "__name__", func)) table = concat(pieces, keys=keys, axis=1) return table.__finalize__(data, method="pivot_table") table = __internal_pivot_table( data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort, ) return table.__finalize__(data, method="pivot_table") def _normalize( table: DataFrame, normalize, margins: bool, margins_name: Hashable = "All" ) -> DataFrame: if not isinstance(normalize, (bool, str)): axis_subs = {0: "index", 1: "columns"} try: normalize = axis_subs[normalize] except KeyError as err: raise ValueError("Not a valid normalize argument") from err if margins is False: # Actual Normalizations normalizers: dict[bool | str, Callable] = { "all": lambda x: x / x.sum(axis=1).sum(axis=0), "columns": lambda x: x / x.sum(), "index": lambda x: x.div(x.sum(axis=1), axis=0), } normalizers[True] = normalizers["all"] try: f = normalizers[normalize] except KeyError as err: raise ValueError("Not a valid normalize argument") from err table = f(table) table = table.fillna(0) elif margins is True: # keep index and column of pivoted table table_index = table.index table_columns = table.columns last_ind_or_col = table.iloc[-1, :].name # check if margin name is not in (for MI cases) and not equal to last # index/column and save the column and index margin if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col): raise ValueError(f"{margins_name} not in pivoted DataFrame") column_margin = table.iloc[:-1, -1] index_margin = table.iloc[-1, :-1] # keep the core table table = table.iloc[:-1, :-1] # Normalize core table = _normalize(table, normalize=normalize, margins=False) # Fix Margins if normalize == "columns": column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) table.columns = table_columns elif normalize == "index": index_margin = index_margin / index_margin.sum() table = table._append(index_margin) table = table.fillna(0) table.index = table_index elif normalize == "all" or normalize is True: column_margin = column_margin / column_margin.sum() index_margin = index_margin / index_margin.sum() index_margin.loc[margins_name] = 1 table = concat([table, column_margin], axis=1) table = table._append(index_margin) table = table.fillna(0) table.index = table_index table.columns = table_columns else: raise ValueError("Not a valid normalize argument") else: raise ValueError("Not a valid margins argument") return table def _get_names(arrs, names, prefix: str = "row"): if names is None: names = [] for i, arr in enumerate(arrs): if isinstance(arr, ABCSeries) and arr.name is not None: names.append(arr.name) else: names.append(f"{prefix}_{i}") else: if len(names) != len(arrs): raise AssertionError("arrays and names must have the same length") if not isinstance(names, list): names = list(names) return names def _build_names_mapper( rownames: list[str], colnames: list[str] ) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]: """ Given the names of a DataFrame's rows and columns, returns a set of unique row and column names and mappers that convert to original names. A row or column name is replaced if it is duplicate among the rows of the inputs, among the columns of the inputs or between the rows and the columns. Parameters ---------- rownames: list[str] colnames: list[str] Returns ------- Tuple(Dict[str, str], List[str], Dict[str, str], List[str]) rownames_mapper: dict[str, str] a dictionary with new row names as keys and original rownames as values unique_rownames: list[str] a list of rownames with duplicate names replaced by dummy names colnames_mapper: dict[str, str] a dictionary with new column names as keys and original column names as values unique_colnames: list[str] a list of column names with duplicate names replaced by dummy names """ def get_duplicates(names): seen: set = set() return {name for name in names if name not in seen} shared_names = set(rownames).intersection(set(colnames)) dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names rownames_mapper = { f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names } unique_rownames = [ f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames) ] colnames_mapper = { f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names } unique_colnames = [ f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames) ] return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) def get_objs_combined_axis( objs, intersect: bool = False, axis: Axis = 0, sort: bool = True, copy: bool = False ) -> Index: """ Extract combined index: return intersection or union (depending on the value of "intersect") of indexes on given axis, or None if all objects lack indexes (e.g. they are numpy arrays). Parameters ---------- objs : list Series or DataFrame objects, may be mix of the two. intersect : bool, default False If True, calculate the intersection between indexes. Otherwise, calculate the union. axis : {0 or 'index', 1 or 'outer'}, default 0 The axis to extract indexes from. sort : bool, default True Whether the result index should come out sorted or not. copy : bool, default False If True, return a copy of the combined index. Returns ------- Index """ obs_idxes = [obj._get_axis(axis) for obj in objs] return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy) The provided code snippet includes necessary dependencies for implementing the `crosstab` function. Write a Python function `def crosstab( index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: bool = False, margins_name: Hashable = "All", dropna: bool = True, normalize: bool = False, ) -> DataFrame` to solve the following problem: Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 Here is the function: def crosstab( index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: bool = False, margins_name: Hashable = "All", dropna: bool = True, normalize: bool = False, ) -> DataFrame: """ Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 """ if values is None and aggfunc is not None: raise ValueError("aggfunc cannot be used without values.") if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") if not is_nested_list_like(index): index = [index] if not is_nested_list_like(columns): columns = [columns] common_idx = None pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] if pass_objs: common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) rownames = _get_names(index, rownames, prefix="row") colnames = _get_names(columns, colnames, prefix="col") # duplicate names mapped to unique names for pivot op ( rownames_mapper, unique_rownames, colnames_mapper, unique_colnames, ) = _build_names_mapper(rownames, colnames) from pandas import DataFrame data = { **dict(zip(unique_rownames, index)), **dict(zip(unique_colnames, columns)), } df = DataFrame(data, index=common_idx) if values is None: df["__dummy__"] = 0 kwargs = {"aggfunc": len, "fill_value": 0} else: df["__dummy__"] = values kwargs = {"aggfunc": aggfunc} # error: Argument 7 to "pivot_table" of "DataFrame" has incompatible type # "**Dict[str, object]"; expected "Union[...]" table = df.pivot_table( "__dummy__", index=unique_rownames, columns=unique_colnames, margins=margins, margins_name=margins_name, dropna=dropna, **kwargs, # type: ignore[arg-type] ) # Post-process if normalize is not False: table = _normalize( table, normalize=normalize, margins=margins, margins_name=margins_name ) table = table.rename_axis(index=rownames_mapper, axis=0) table = table.rename_axis(columns=colnames_mapper, axis=1) return table
Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0
173,143
from __future__ import annotations from collections import defaultdict import itertools from typing import ( Hashable, Iterable, ) import numpy as np from pandas._libs.sparse import IntIndex from pandas._typing import NpDtype from pandas.core.dtypes.common import ( is_integer_dtype, is_list_like, is_object_dtype, pandas_dtype, ) from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.frame import DataFrame from pandas.core.indexes.api import ( Index, default_index, ) from pandas.core.series import Series def _get_dummies_1d( data, prefix, prefix_sep: str | Iterable[str] | dict[str, str] = "_", dummy_na: bool = False, sparse: bool = False, drop_first: bool = False, dtype: NpDtype | None = None, ) -> DataFrame: from pandas.core.reshape.concat import concat # Series avoids inconsistent NaN handling codes, levels = factorize_from_iterable(Series(data, copy=False)) if dtype is None: dtype = np.dtype(bool) _dtype = pandas_dtype(dtype) if is_object_dtype(_dtype): raise ValueError("dtype=object is not a valid dtype for get_dummies") def get_empty_frame(data) -> DataFrame: index: Index | np.ndarray if isinstance(data, Series): index = data.index else: index = default_index(len(data)) return DataFrame(index=index) # if all NaN if not dummy_na and len(levels) == 0: return get_empty_frame(data) codes = codes.copy() if dummy_na: codes[codes == -1] = len(levels) levels = levels.insert(len(levels), np.nan) # if dummy_na, we just fake a nan level. drop_first will drop it again if drop_first and len(levels) == 1: return get_empty_frame(data) number_of_cols = len(levels) if prefix is None: dummy_cols = levels else: dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels]) index: Index | None if isinstance(data, Series): index = data.index else: index = None if sparse: fill_value: bool | float if is_integer_dtype(dtype): fill_value = 0 elif dtype == np.dtype(bool): fill_value = False else: fill_value = 0.0 sparse_series = [] N = len(data) sp_indices: list[list] = [[] for _ in range(len(dummy_cols))] mask = codes != -1 codes = codes[mask] n_idx = np.arange(N)[mask] for ndx, code in zip(n_idx, codes): sp_indices[code].append(ndx) if drop_first: # remove first categorical level to avoid perfect collinearity # GH12042 sp_indices = sp_indices[1:] dummy_cols = dummy_cols[1:] for col, ixs in zip(dummy_cols, sp_indices): sarr = SparseArray( np.ones(len(ixs), dtype=dtype), sparse_index=IntIndex(N, ixs), fill_value=fill_value, dtype=dtype, ) sparse_series.append(Series(data=sarr, index=index, name=col, copy=False)) return concat(sparse_series, axis=1, copy=False) else: # take on axis=1 + transpose to ensure ndarray layout is column-major eye_dtype: NpDtype if isinstance(_dtype, np.dtype): eye_dtype = _dtype else: eye_dtype = np.bool_ dummy_mat = np.eye(number_of_cols, dtype=eye_dtype).take(codes, axis=1).T if not dummy_na: # reset NaN GH4446 dummy_mat[codes == -1] = 0 if drop_first: # remove first GH12042 dummy_mat = dummy_mat[:, 1:] dummy_cols = dummy_cols[1:] return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype) class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def concat( objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[Series] | Mapping[HashableT, Series], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[1, "columns"], join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = 0, join: str = "outer", ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool | None = None, ) -> DataFrame | Series: """ Concatenate pandas objects along a particular axis. Allows optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series or DataFrame objects If a mapping is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default False Sort non-concatenation axis if it is not already aligned. copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__. It is not recommended to build DataFrames by adding single rows in a for loop. Build a list of rows and make a DataFrame in a single concat. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] Append a single row to the end of a ``DataFrame`` object. >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) >>> df7 a b 0 1 2 >>> new_row = pd.Series({'a': 3, 'b': 4}) >>> new_row a 3 b 4 dtype: int64 >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) a b 0 1 2 1 3 4 """ if copy is None: if using_copy_on_write(): copy = False else: copy = True elif copy and using_copy_on_write(): copy = False op = _Concatenator( objs, axis=axis, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort, ) return op.get_result() The provided code snippet includes necessary dependencies for implementing the `get_dummies` function. Write a Python function `def get_dummies( data, prefix=None, prefix_sep: str | Iterable[str] | dict[str, str] = "_", dummy_na: bool = False, columns=None, sparse: bool = False, drop_first: bool = False, dtype: NpDtype | None = None, ) -> DataFrame` to solve the following problem: Convert categorical variable into dummy/indicator variables. Each variable is converted in as many 0/1 variables as there are different values. Columns in the output are each named after a value; if the input is a DataFrame, the name of the original variable is prepended to the value. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object`, `string`, or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy-coded data. If `data` contains other columns than the dummy-coded one(s), these will be prepended, unaltered, to the result. See Also -------- Series.str.get_dummies : Convert Series of strings to dummy codes. :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. Notes ----- Reference :ref:`the user guide <reshaping.dummies>` for more examples. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 True False False 1 False True False 2 False False True 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 True False 1 False True 2 False False >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 True False False 1 False True False 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 True False False True False 1 2 False True True False False 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 True False False 1 False True False 2 False False True 3 True False False 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 False False 1 True False 2 False True 3 False False 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 Here is the function: def get_dummies( data, prefix=None, prefix_sep: str | Iterable[str] | dict[str, str] = "_", dummy_na: bool = False, columns=None, sparse: bool = False, drop_first: bool = False, dtype: NpDtype | None = None, ) -> DataFrame: """ Convert categorical variable into dummy/indicator variables. Each variable is converted in as many 0/1 variables as there are different values. Columns in the output are each named after a value; if the input is a DataFrame, the name of the original variable is prepended to the value. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object`, `string`, or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy-coded data. If `data` contains other columns than the dummy-coded one(s), these will be prepended, unaltered, to the result. See Also -------- Series.str.get_dummies : Convert Series of strings to dummy codes. :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. Notes ----- Reference :ref:`the user guide <reshaping.dummies>` for more examples. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 True False False 1 False True False 2 False False True 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 True False 1 False True 2 False False >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 True False False 1 False True False 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 True False False True False 1 2 False True True False False 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 True False False 1 False True False 2 False False True 3 True False False 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 False False 1 True False 2 False True 3 False False 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ from pandas.core.reshape.concat import concat dtypes_to_encode = ["object", "string", "category"] if isinstance(data, DataFrame): # determine columns being encoded if columns is None: data_to_encode = data.select_dtypes(include=dtypes_to_encode) elif not is_list_like(columns): raise TypeError("Input must be a list-like for parameter `columns`") else: data_to_encode = data[columns] # validate prefixes and separator to avoid silently dropping cols def check_len(item, name): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = ( f"Length of '{name}' ({len(item)}) did not match the " "length of the columns being encoded " f"({data_to_encode.shape[1]})." ) raise ValueError(len_msg) check_len(prefix, "prefix") check_len(prefix_sep, "prefix_sep") if isinstance(prefix, str): prefix = itertools.cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in data_to_encode.columns] if prefix is None: prefix = data_to_encode.columns # validate separators if isinstance(prefix_sep, str): prefix_sep = itertools.cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] with_dummies: list[DataFrame] if data_to_encode.shape == data.shape: # Encoding the entire df, do not prepend any dropped columns with_dummies = [] elif columns is not None: # Encoding only cols specified in columns. Get all cols not in # columns to prepend to result. with_dummies = [data.drop(columns, axis=1)] else: # Encoding only object and category dtype columns. Get remaining # columns to prepend to result. with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] for col, pre, sep in zip(data_to_encode.items(), prefix, prefix_sep): # col is (column_name, column), use just column data here dummy = _get_dummies_1d( col[1], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d( data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) return result
Convert categorical variable into dummy/indicator variables. Each variable is converted in as many 0/1 variables as there are different values. Columns in the output are each named after a value; if the input is a DataFrame, the name of the original variable is prepended to the value. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object`, `string`, or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy-coded data. If `data` contains other columns than the dummy-coded one(s), these will be prepended, unaltered, to the result. See Also -------- Series.str.get_dummies : Convert Series of strings to dummy codes. :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. Notes ----- Reference :ref:`the user guide <reshaping.dummies>` for more examples. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 True False False 1 False True False 2 False False True 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 True False 1 False True 2 False False >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 True False False 1 False True False 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 True False False True False 1 2 False True True False False 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 True False False 1 False True False 2 False False True 3 True False False 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 False False 1 True False 2 False True 3 False False 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0
173,144
from __future__ import annotations from collections import defaultdict import itertools from typing import ( Hashable, Iterable, ) import numpy as np from pandas._libs.sparse import IntIndex from pandas._typing import NpDtype from pandas.core.dtypes.common import ( is_integer_dtype, is_list_like, is_object_dtype, pandas_dtype, ) from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.frame import DataFrame from pandas.core.indexes.api import ( Index, default_index, ) from pandas.core.series import Series class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def concat( objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[Series] | Mapping[HashableT, Series], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Literal[1, "columns"], join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = ..., join: str = ..., ignore_index: bool = ..., keys=..., levels=..., names=..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., ) -> DataFrame | Series: ... def concat( objs: Iterable[NDFrame] | Mapping[HashableT, NDFrame], *, axis: Axis = 0, join: str = "outer", ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool | None = None, ) -> DataFrame | Series: """ Concatenate pandas objects along a particular axis. Allows optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series or DataFrame objects If a mapping is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default False Sort non-concatenation axis if it is not already aligned. copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__. It is not recommended to build DataFrames by adding single rows in a for loop. Build a list of rows and make a DataFrame in a single concat. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] Append a single row to the end of a ``DataFrame`` object. >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) >>> df7 a b 0 1 2 >>> new_row = pd.Series({'a': 3, 'b': 4}) >>> new_row a 3 b 4 dtype: int64 >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) a b 0 1 2 1 3 4 """ if copy is None: if using_copy_on_write(): copy = False else: copy = True elif copy and using_copy_on_write(): copy = False op = _Concatenator( objs, axis=axis, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort, ) return op.get_result() The provided code snippet includes necessary dependencies for implementing the `from_dummies` function. Write a Python function `def from_dummies( data: DataFrame, sep: None | str = None, default_category: None | Hashable | dict[str, Hashable] = None, ) -> DataFrame` to solve the following problem: Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. Inverts the operation performed by :func:`~pandas.get_dummies`. .. versionadded:: 1.5.0 Parameters ---------- data : DataFrame Data which contains dummy-coded variables in form of integer columns of 1's and 0's. sep : str, default None Separator used in the column names of the dummy categories they are character indicating the separation of the categorical names from the prefixes. For example, if your column names are 'prefix_A' and 'prefix_B', you can strip the underscore by specifying sep='_'. default_category : None, Hashable or dict of Hashables, default None The default category is the implied category when a value has none of the listed categories specified with a one, i.e. if all dummies in a row are zero. Can be a single value for all variables or a dict directly mapping the default categories to a prefix of a variable. Returns ------- DataFrame Categorical data decoded from the dummy input-data. Raises ------ ValueError * When the input ``DataFrame`` ``data`` contains NA values. * When the input ``DataFrame`` ``data`` contains column names with separators that do not match the separator specified with ``sep``. * When a ``dict`` passed to ``default_category`` does not include an implied category for each prefix. * When a value in ``data`` has more than one category assigned to it. * When ``default_category=None`` and a value in ``data`` has no category assigned to it. TypeError * When the input ``data`` is not of type ``DataFrame``. * When the input ``DataFrame`` ``data`` contains non-dummy data. * When the passed ``sep`` is of a wrong data type. * When the passed ``default_category`` is of a wrong data type. See Also -------- :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. :class:`~pandas.Categorical` : Represent a categorical variable in classic. Notes ----- The columns of the passed dummy data should only include 1's and 0's, or boolean values. Examples -------- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], ... "c": [0, 0, 1, 0]}) >>> df a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> pd.from_dummies(df) 0 a 1 b 2 c 3 a >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 1]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 1 0 0 0 1 >>> pd.from_dummies(df, sep="_") col1 col2 0 a b 1 b a 2 a c >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 0]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 0 0 0 0 0 >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) col1 col2 0 a b 1 b a 2 d e Here is the function: def from_dummies( data: DataFrame, sep: None | str = None, default_category: None | Hashable | dict[str, Hashable] = None, ) -> DataFrame: """ Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. Inverts the operation performed by :func:`~pandas.get_dummies`. .. versionadded:: 1.5.0 Parameters ---------- data : DataFrame Data which contains dummy-coded variables in form of integer columns of 1's and 0's. sep : str, default None Separator used in the column names of the dummy categories they are character indicating the separation of the categorical names from the prefixes. For example, if your column names are 'prefix_A' and 'prefix_B', you can strip the underscore by specifying sep='_'. default_category : None, Hashable or dict of Hashables, default None The default category is the implied category when a value has none of the listed categories specified with a one, i.e. if all dummies in a row are zero. Can be a single value for all variables or a dict directly mapping the default categories to a prefix of a variable. Returns ------- DataFrame Categorical data decoded from the dummy input-data. Raises ------ ValueError * When the input ``DataFrame`` ``data`` contains NA values. * When the input ``DataFrame`` ``data`` contains column names with separators that do not match the separator specified with ``sep``. * When a ``dict`` passed to ``default_category`` does not include an implied category for each prefix. * When a value in ``data`` has more than one category assigned to it. * When ``default_category=None`` and a value in ``data`` has no category assigned to it. TypeError * When the input ``data`` is not of type ``DataFrame``. * When the input ``DataFrame`` ``data`` contains non-dummy data. * When the passed ``sep`` is of a wrong data type. * When the passed ``default_category`` is of a wrong data type. See Also -------- :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. :class:`~pandas.Categorical` : Represent a categorical variable in classic. Notes ----- The columns of the passed dummy data should only include 1's and 0's, or boolean values. Examples -------- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], ... "c": [0, 0, 1, 0]}) >>> df a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> pd.from_dummies(df) 0 a 1 b 2 c 3 a >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 1]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 1 0 0 0 1 >>> pd.from_dummies(df, sep="_") col1 col2 0 a b 1 b a 2 a c >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 0]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 0 0 0 0 0 >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) col1 col2 0 a b 1 b a 2 d e """ from pandas.core.reshape.concat import concat if not isinstance(data, DataFrame): raise TypeError( "Expected 'data' to be a 'DataFrame'; " f"Received 'data' of type: {type(data).__name__}" ) if data.isna().any().any(): raise ValueError( "Dummy DataFrame contains NA value in column: " f"'{data.isna().any().idxmax()}'" ) # index data with a list of all columns that are dummies try: data_to_decode = data.astype("boolean", copy=False) except TypeError: raise TypeError("Passed DataFrame contains non-dummy data") # collect prefixes and get lists to slice data for each prefix variables_slice = defaultdict(list) if sep is None: variables_slice[""] = list(data.columns) elif isinstance(sep, str): for col in data_to_decode.columns: prefix = col.split(sep)[0] if len(prefix) == len(col): raise ValueError(f"Separator not specified for column: {col}") variables_slice[prefix].append(col) else: raise TypeError( "Expected 'sep' to be of type 'str' or 'None'; " f"Received 'sep' of type: {type(sep).__name__}" ) if default_category is not None: if isinstance(default_category, dict): if not len(default_category) == len(variables_slice): len_msg = ( f"Length of 'default_category' ({len(default_category)}) " f"did not match the length of the columns being encoded " f"({len(variables_slice)})" ) raise ValueError(len_msg) elif isinstance(default_category, Hashable): default_category = dict( zip(variables_slice, [default_category] * len(variables_slice)) ) else: raise TypeError( "Expected 'default_category' to be of type " "'None', 'Hashable', or 'dict'; " "Received 'default_category' of type: " f"{type(default_category).__name__}" ) cat_data = {} for prefix, prefix_slice in variables_slice.items(): if sep is None: cats = prefix_slice.copy() else: cats = [col[len(prefix + sep) :] for col in prefix_slice] assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1) if any(assigned > 1): raise ValueError( "Dummy DataFrame contains multi-assignment(s); " f"First instance in row: {assigned.idxmax()}" ) if any(assigned == 0): if isinstance(default_category, dict): cats.append(default_category[prefix]) else: raise ValueError( "Dummy DataFrame contains unassigned value(s); " f"First instance in row: {assigned.idxmin()}" ) data_slice = concat( (data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1 ) else: data_slice = data_to_decode.loc[:, prefix_slice] cats_array = np.array(cats, dtype="object") # get indices of True entries along axis=1 cat_data[prefix] = cats_array[data_slice.to_numpy().nonzero()[1]] return DataFrame(cat_data)
Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. Inverts the operation performed by :func:`~pandas.get_dummies`. .. versionadded:: 1.5.0 Parameters ---------- data : DataFrame Data which contains dummy-coded variables in form of integer columns of 1's and 0's. sep : str, default None Separator used in the column names of the dummy categories they are character indicating the separation of the categorical names from the prefixes. For example, if your column names are 'prefix_A' and 'prefix_B', you can strip the underscore by specifying sep='_'. default_category : None, Hashable or dict of Hashables, default None The default category is the implied category when a value has none of the listed categories specified with a one, i.e. if all dummies in a row are zero. Can be a single value for all variables or a dict directly mapping the default categories to a prefix of a variable. Returns ------- DataFrame Categorical data decoded from the dummy input-data. Raises ------ ValueError * When the input ``DataFrame`` ``data`` contains NA values. * When the input ``DataFrame`` ``data`` contains column names with separators that do not match the separator specified with ``sep``. * When a ``dict`` passed to ``default_category`` does not include an implied category for each prefix. * When a value in ``data`` has more than one category assigned to it. * When ``default_category=None`` and a value in ``data`` has no category assigned to it. TypeError * When the input ``data`` is not of type ``DataFrame``. * When the input ``DataFrame`` ``data`` contains non-dummy data. * When the passed ``sep`` is of a wrong data type. * When the passed ``default_category`` is of a wrong data type. See Also -------- :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. :class:`~pandas.Categorical` : Represent a categorical variable in classic. Notes ----- The columns of the passed dummy data should only include 1's and 0's, or boolean values. Examples -------- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], ... "c": [0, 0, 1, 0]}) >>> df a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> pd.from_dummies(df) 0 a 1 b 2 c 3 a >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 1]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 1 0 0 0 1 >>> pd.from_dummies(df, sep="_") col1 col2 0 a b 1 b a 2 a c >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 0]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 0 0 0 0 0 >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) col1 col2 0 a b 1 b a 2 d e
173,145
from __future__ import annotations from collections import abc from typing import ( TYPE_CHECKING, Callable, Hashable, Iterable, Literal, Mapping, cast, overload, ) import numpy as np from pandas._config import using_copy_on_write from pandas._typing import ( Axis, AxisInt, HashableT, ) from pandas.util._decorators import cache_readonly from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.inference import is_bool from pandas.core.dtypes.missing import isna from pandas.core.arrays.categorical import ( factorize_from_iterable, factorize_from_iterables, ) import pandas.core.common as com from pandas.core.indexes.api import ( Index, MultiIndex, all_indexes_same, default_index, ensure_index, get_objs_combined_axis, get_unanimous_names, ) from pandas.core.internals import concatenate_managers def _concat_indexes(indexes) -> Index: return indexes[0].append(indexes[1:]) def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) ) ) def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: """ Factorize an input `values` into `categories` and `codes`. Preserves categorical dtype in `categories`. Parameters ---------- values : list-like Returns ------- codes : ndarray categories : Index If `values` has a categorical dtype, then `categories` is a CategoricalIndex keeping the categories and order of `values`. """ from pandas import CategoricalIndex if not is_list_like(values): raise TypeError("Input must be list-like") categories: Index if is_categorical_dtype(values): values = extract_array(values) # The Categorical we want to build has the same categories # as values but its codes are by def [0, ..., len(n_categories) - 1] cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) cat = Categorical.from_codes(cat_codes, dtype=values.dtype) categories = CategoricalIndex(cat) codes = values.codes else: # The value of ordered is irrelevant since we don't use cat as such, # but only the resulting categories, the order of which is independent # from ordered. Set ordered to False as default. See GH #15457 cat = Categorical(values, ordered=False) categories = cat.categories codes = cat.codes return codes, categories def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: """ A higher-level wrapper over `factorize_from_iterable`. Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See `factorize_from_iterable` for more info. """ if len(iterables) == 0: # For consistency, it should return two empty lists. return [], [] codes, categories = zip(*(factorize_from_iterable(it) for it in iterables)) return list(codes), list(categories) def all_indexes_same(indexes) -> bool: """ Determine if all indexes contain the same elements. Parameters ---------- indexes : iterable of Index objects Returns ------- bool True if all indexes contain the same elements, False otherwise. """ itr = iter(indexes) first = next(itr) return all(first.equals(index) for index in itr) def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: if (levels is None and isinstance(keys[0], tuple)) or ( levels is not None and len(levels) > 1 ): zipped = list(zip(*keys)) if names is None: names = [None] * len(zipped) if levels is None: _, levels = factorize_from_iterables(zipped) else: levels = [ensure_index(x) for x in levels] else: zipped = [keys] if names is None: names = [None] if levels is None: levels = [ensure_index(keys).unique()] else: levels = [ensure_index(x) for x in levels] for level in levels: if not level.is_unique: raise ValueError(f"Level values not unique: {level.tolist()}") if not all_indexes_same(indexes) or not all(level.is_unique for level in levels): codes_list = [] # things are potentially different sizes, so compute the exact codes # for each level and pass those to MultiIndex.from_arrays for hlevel, level in zip(zipped, levels): to_concat = [] for key, index in zip(hlevel, indexes): # Find matching codes, include matching nan values as equal. mask = (isna(level) & isna(key)) | (level == key) if not mask.any(): raise ValueError(f"Key {key} not in level {level}") i = np.nonzero(mask)[0][0] to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) concat_index = _concat_indexes(indexes) # these go at the end if isinstance(concat_index, MultiIndex): levels.extend(concat_index.levels) codes_list.extend(concat_index.codes) else: codes, categories = factorize_from_iterable(concat_index) levels.append(categories) codes_list.append(codes) if len(names) == len(levels): names = list(names) else: # make sure that all of the passed indices have the same nlevels if not len({idx.nlevels for idx in indexes}) == 1: raise AssertionError( "Cannot concat indices that do not have the same number of levels" ) # also copies names = list(names) + list(get_unanimous_names(*indexes)) return MultiIndex( levels=levels, codes=codes_list, names=names, verify_integrity=False ) new_index = indexes[0] n = len(new_index) kpieces = len(indexes) # also copies new_names = list(names) new_levels = list(levels) # construct codes new_codes = [] # do something a bit more speedy for hlevel, level in zip(zipped, levels): hlevel = ensure_index(hlevel) mapped = level.get_indexer(hlevel) mask = mapped == -1 if mask.any(): raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}") new_codes.append(np.repeat(mapped, n)) if isinstance(new_index, MultiIndex): new_levels.extend(new_index.levels) new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes]) else: new_levels.append(new_index.unique()) single_codes = new_index.unique().get_indexer(new_index) new_codes.append(np.tile(single_codes, kpieces)) if len(new_names) < len(new_levels): new_names.extend(new_index.names) return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False )
null
173,146
from __future__ import annotations import re from typing import ( TYPE_CHECKING, Hashable, ) import numpy as np from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( is_extension_array_dtype, is_list_like, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.arrays import Categorical import pandas.core.common as com from pandas.core.indexes.api import ( Index, MultiIndex, ) from pandas.core.reshape.concat import concat from pandas.core.reshape.util import tile_compat from pandas.core.shared_docs import _shared_docs from pandas.core.tools.numeric import to_numeric def concat_compat(to_concat, axis: AxisInt = 0, ea_compat_axis: bool = False): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation ea_compat_axis : bool, default False For ExtensionArray compat, behave as if axis == 1 when determining whether to drop empty arrays. Returns ------- a single array, preserving the combined dtypes """ # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x) -> bool: if x.ndim <= axis: return True return x.shape[axis] > 0 # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # # Creating an empty array directly is tempting, but the winnings would be # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. non_empties = [x for x in to_concat if is_nonempty(x)] if non_empties and axis == 0 and not ea_compat_axis: # ea_compat_axis see GH#39574 to_concat = non_empties dtypes = {obj.dtype for obj in to_concat} kinds = {obj.dtype.kind for obj in to_concat} contains_datetime = any( isinstance(dtype, (np.dtype, DatetimeTZDtype)) and dtype.kind in ["m", "M"] for dtype in dtypes ) or any(isinstance(obj, ABCExtensionArray) and obj.ndim > 1 for obj in to_concat) all_empty = not len(non_empties) single_dtype = len({x.dtype for x in to_concat}) == 1 any_ea = any(isinstance(x.dtype, ExtensionDtype) for x in to_concat) if contains_datetime: return _concat_datetime(to_concat, axis=axis) if any_ea: # we ignore axis here, as internally concatting with EAs is always # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) to_concat = [ astype_array(arr, target_dtype, copy=False) for arr in to_concat ] if isinstance(to_concat[0], ABCExtensionArray): # TODO: what about EA-backed Index? cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: return np.concatenate(to_concat) elif all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) if len(kinds) != 1: if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): # let numpy coerce pass else: # coerce to object to_concat = [x.astype("object") for x in to_concat] kinds = {"o"} result = np.concatenate(to_concat, axis=axis) if "b" in kinds and result.dtype.kind in ["i", "u", "f"]: # GH#39817 cast to object instead of casting bools to numeric result = result.astype(object, copy=False) return result def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res The provided code snippet includes necessary dependencies for implementing the `lreshape` function. Write a Python function `def lreshape(data: DataFrame, groups, dropna: bool = True) -> DataFrame` to solve the following problem: Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. Accepts a dictionary, ``groups``, in which each key is a new column name and each value is a list of old column names that will be "melted" under the new column name as part of the reshape. Parameters ---------- data : DataFrame The wide-format DataFrame. groups : dict {new_name : list_of_columns}. dropna : bool, default True Do not include columns whose entries are all NaN. Returns ------- DataFrame Reshaped DataFrame. See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Here is the function: def lreshape(data: DataFrame, groups, dropna: bool = True) -> DataFrame: """ Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. Accepts a dictionary, ``groups``, in which each key is a new column name and each value is a list of old column names that will be "melted" under the new column name as part of the reshape. Parameters ---------- data : DataFrame The wide-format DataFrame. groups : dict {new_name : list_of_columns}. dropna : bool, default True Do not include columns whose entries are all NaN. Returns ------- DataFrame Reshaped DataFrame. See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 """ if isinstance(groups, dict): keys = list(groups.keys()) values = list(groups.values()) else: keys, values = zip(*groups) all_cols = list(set.union(*(set(x) for x in values))) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) for seq in values: if len(seq) != K: raise ValueError("All column lists must be same length") mdata = {} pivot_cols = [] for target, names in zip(keys, values): to_concat = [data[col]._values for col in names] mdata[target] = concat_compat(to_concat) pivot_cols.append(target) for col in id_cols: mdata[col] = np.tile(data[col]._values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols)
Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. Accepts a dictionary, ``groups``, in which each key is a new column name and each value is a list of old column names that will be "melted" under the new column name as part of the reshape. Parameters ---------- data : DataFrame The wide-format DataFrame. groups : dict {new_name : list_of_columns}. dropna : bool, default True Do not include columns whose entries are all NaN. Returns ------- DataFrame Reshaped DataFrame. See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526
173,147
from __future__ import annotations import re from typing import ( TYPE_CHECKING, Hashable, ) import numpy as np from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( is_extension_array_dtype, is_list_like, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.arrays import Categorical import pandas.core.common as com from pandas.core.indexes.api import ( Index, MultiIndex, ) from pandas.core.reshape.concat import concat from pandas.core.reshape.util import tile_compat from pandas.core.shared_docs import _shared_docs from pandas.core.tools.numeric import to_numeric def melt( frame: DataFrame, id_vars=None, value_vars=None, var_name=None, value_name: Hashable = "value", col_level=None, ignore_index: bool = True, ) -> DataFrame: # If multiindex, gather names of columns on all level for checking presence # of `id_vars` and `value_vars` if isinstance(frame.columns, MultiIndex): cols = [x for c in frame.columns for x in c] else: cols = list(frame.columns) if value_name in frame.columns: raise ValueError( f"value_name ({value_name}) cannot match an element in " "the DataFrame columns." ) if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list): raise ValueError( "id_vars must be a list of tuples when columns are a MultiIndex" ) else: # Check that `id_vars` are in frame id_vars = list(id_vars) missing = Index(com.flatten(id_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'id_vars' are not present " f"in the DataFrame: {list(missing)}" ) else: id_vars = [] if value_vars is not None: if not is_list_like(value_vars): value_vars = [value_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list): raise ValueError( "value_vars must be a list of tuples when columns are a MultiIndex" ) else: value_vars = list(value_vars) # Check that `value_vars` are in frame missing = Index(com.flatten(value_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'value_vars' are not present in " f"the DataFrame: {list(missing)}" ) if col_level is not None: idx = frame.columns.get_level_values(col_level).get_indexer( id_vars + value_vars ) else: idx = algos.unique(frame.columns.get_indexer_for(id_vars + value_vars)) frame = frame.iloc[:, idx] else: frame = frame.copy() if col_level is not None: # allow list or other? # frame is a copy frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] else: var_name = [ frame.columns.name if frame.columns.name is not None else "variable" ] if isinstance(var_name, str): var_name = [var_name] N, K = frame.shape K -= len(id_vars) mdata: dict[Hashable, AnyArrayLike] = {} for col in id_vars: id_data = frame.pop(col) if is_extension_array_dtype(id_data): if K > 0: id_data = concat([id_data] * K, ignore_index=True) else: # We can't concat empty list. (GH 46044) id_data = type(id_data)([], name=id_data.name, dtype=id_data.dtype) else: # error: Incompatible types in assignment (expression has type # "ndarray[Any, dtype[Any]]", variable has type "Series") id_data = np.tile(id_data._values, K) # type: ignore[assignment] mdata[col] = id_data mcolumns = id_vars + var_name + [value_name] if frame.shape[1] > 0: mdata[value_name] = concat( [frame.iloc[:, i] for i in range(frame.shape[1])] ).values else: mdata[value_name] = frame._values.ravel("F") for i, col in enumerate(var_name): # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) result = frame._constructor(mdata, columns=mcolumns) if not ignore_index: result.index = tile_compat(frame.index, K) return result def to_numeric( arg, errors: DateTimeErrorChoices = "raise", downcast: Literal["integer", "signed", "unsigned", "float"] | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can be stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series Argument to be converted. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. downcast : str, default None Can be 'integer', 'signed', 'unsigned', or 'float'. If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- ret Numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. DataFrame.convert_dtypes : Convert dtypes. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 Downcasting of nullable integer and floating dtypes is supported: >>> s = pd.Series([1, 2, 3], dtype="Int64") >>> pd.to_numeric(s, downcast="integer") 0 1 1 2 2 3 dtype: Int8 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") >>> pd.to_numeric(s, downcast="float") 0 1.0 1 2.1 2 3.0 dtype: Float32 """ if downcast not in (None, "integer", "signed", "unsigned", "float"): raise ValueError("invalid downcasting method provided") if errors not in ("ignore", "raise", "coerce"): raise ValueError("invalid error value specified") check_dtype_backend(dtype_backend) is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.view("i8") else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError("arg must be a list, tuple, 1-d array, or Series") else: values = arg orig_values = values # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: npt.NDArray[np.bool_] | None = None if isinstance(values, BaseMaskedArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, "dtype", None) if isinstance(values_dtype, pd.ArrowDtype): mask = values.isna() values = values.dropna().to_numpy() new_mask: np.ndarray | None = None if is_numeric_dtype(values_dtype): pass elif is_datetime_or_timedelta_dtype(values_dtype): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa values, set(), coerce_numeric=coerce_numeric, convert_to_masked_nullable=dtype_backend is not lib.no_default or isinstance(values_dtype, StringDtype), ) except (ValueError, TypeError): if errors == "raise": raise values = orig_values if new_mask is not None: # Remove unnecessary values, is expected later anyway and enables # downcasting values = values[~new_mask] elif ( dtype_backend is not lib.no_default and new_mask is None or isinstance(values_dtype, StringDtype) ): new_mask = np.zeros(values.shape, dtype=np.bool_) # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values.dtype): typecodes: str | None = None if downcast in ("integer", "signed"): typecodes = np.typecodes["Integer"] elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes["UnsignedInteger"] elif downcast == "float": typecodes = np.typecodes["Float"] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for typecode in typecodes: dtype = np.dtype(typecode) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) # successful conversion if values.dtype == dtype: break # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct # masked array if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype): if mask is None: mask = new_mask else: mask = mask.copy() assert isinstance(mask, np.ndarray) data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ( ArrowExtensionArray, BooleanArray, FloatingArray, IntegerArray, ) klass: type[IntegerArray] | type[BooleanArray] | type[FloatingArray] if is_integer_dtype(data.dtype): klass = IntegerArray elif is_bool_dtype(data.dtype): klass = BooleanArray else: klass = FloatingArray values = klass(data, mask) if dtype_backend == "pyarrow" or isinstance(values_dtype, pd.ArrowDtype): values = ArrowExtensionArray(values.__arrow_array__()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values The provided code snippet includes necessary dependencies for implementing the `wide_to_long` function. Write a Python function `def wide_to_long( df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+" ) -> DataFrame` to solve the following problem: r""" Unpivot a DataFrame from wide to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame. stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s). j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'`. suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'`. When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3), ... 'A(weekly)-2011': np.random.rand(3), ... 'B(weekly)-2010': np.random.rand(3), ... 'B(weekly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id 0 0.548814 0.544883 0.437587 0.383442 0 0 1 0.715189 0.423655 0.891773 0.791725 1 1 2 0.602763 0.645894 0.963663 0.528895 1 2 >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(weekly) B(weekly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != []]) ... ) >>> list(stubnames) ['A(weekly)', 'B(weekly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht_one ht_two 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', ... sep='_', suffix=r'\w+') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9 Here is the function: def wide_to_long( df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+" ) -> DataFrame: r""" Unpivot a DataFrame from wide to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame. stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s). j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'`. suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'`. When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3), ... 'A(weekly)-2011': np.random.rand(3), ... 'B(weekly)-2010': np.random.rand(3), ... 'B(weekly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id 0 0.548814 0.544883 0.437587 0.383442 0 0 1 0.715189 0.423655 0.891773 0.791725 1 1 2 0.602763 0.645894 0.963663 0.528895 1 2 >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(weekly) B(weekly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != []]) ... ) >>> list(stubnames) ['A(weekly)', 'B(weekly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht_one ht_two 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', ... sep='_', suffix=r'\w+') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9 """ def get_var_names(df, stub: str, sep: str, suffix: str) -> list[str]: regex = rf"^{re.escape(stub)}{re.escape(sep)}{suffix}$" pattern = re.compile(regex) return [col for col in df.columns if pattern.match(col)] def melt_stub(df, stub: str, i, j, value_vars, sep: str): newdf = melt( df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j, ) newdf[j] = Categorical(newdf[j]) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "", regex=True) # GH17627 Cast numerics suffixes to int/float newdf[j] = to_numeric(newdf[j], errors="ignore") return newdf.set_index(i + [j]) if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if any(col in stubnames for col in df.columns): raise ValueError("stubname can't be identical to a column name") if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError("the id variables need to uniquely identify each row") value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames] value_vars_flattened = [e for sublist in value_vars for e in sublist] id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) _melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)] melted = _melted[0].join(_melted[1:], how="outer") if len(i) == 1: new = df[id_vars].set_index(i).join(melted) return new new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) return new
r""" Unpivot a DataFrame from wide to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame. stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s). j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'`. suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'`. When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3), ... 'A(weekly)-2011': np.random.rand(3), ... 'B(weekly)-2010': np.random.rand(3), ... 'B(weekly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id 0 0.548814 0.544883 0.437587 0.383442 0 0 1 0.715189 0.423655 0.891773 0.791725 1 1 2 0.602763 0.645894 0.963663 0.528895 1 2 >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(weekly) B(weekly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != []]) ... ) >>> list(stubnames) ['A(weekly)', 'B(weekly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df famid birth ht_one ht_two 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', ... sep='_', suffix=r'\w+') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9
173,148
from __future__ import annotations import itertools from typing import ( TYPE_CHECKING, cast, ) import warnings import numpy as np import pandas._libs.reshape as libreshape from pandas._typing import npt from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( ensure_platform_int, is_1d_only_ea_dtype, is_extension_array_dtype, is_integer, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.frame import DataFrame from pandas.core.indexes.api import ( Index, MultiIndex, ) from pandas.core.series import Series from pandas.core.sorting import ( compress_group_index, decons_obs_group_ids, get_compressed_ids, get_group_index, get_group_index_sorter, ) def stack(frame: DataFrame, level=-1, dropna: bool = True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series or DataFrame """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = factorize_from_iterable(index) return categories, codes N, K = frame.shape # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] clev, clab = factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) codes = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex( levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False, ) if not frame.empty and frame._is_homogeneous_type: # For homogeneous EAs, frame._values will coerce to object. So # we concatenate instead. dtypes = list(frame.dtypes._values) dtype = dtypes[0] if is_extension_array_dtype(dtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type( [col._values for _, col in frame.items()] ) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: # homogeneous, non-EA new_values = frame._values.ravel() else: # non-homogeneous new_values = frame._values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index) def stack_multiple(frame, level, dropna: bool = True): # If all passed levels match up to column names, no # ambiguity about what to do if all(lev in frame.columns.names for lev in level): result = frame for lev in level: result = stack(result, lev, dropna=dropna) # Otherwise, level numbers may change as each successive level is stacked elif all(isinstance(lev, int) for lev in level): # As each stack is done, the level numbers decrease, so we need # to account for that when level is a sequence of ints result = frame # _get_level_number() checks level numbers are in range and converts # negative numbers to positive level = [frame.columns._get_level_number(lev) for lev in level] while level: lev = level.pop(0) result = stack(result, lev, dropna=dropna) # Decrement all level numbers greater than current, as these # have now shifted down by one level = [v if v <= lev else v - 1 for v in level] else: raise ValueError( "level should contain all level names or all level " "numbers, not a mixture of the two." ) return result
null
173,149
from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency TYPE_CHECKING = True def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module The provided code snippet includes necessary dependencies for implementing the `generate_online_numba_ewma_func` function. Write a Python function `def generate_online_numba_ewma_func( nopython: bool, nogil: bool, parallel: bool, )` to solve the following problem: Generate a numba jitted groupby ewma function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_online_numba_ewma_func( nopython: bool, nogil: bool, parallel: bool, ): """ Generate a numba jitted groupby ewma function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def online_ewma( values: np.ndarray, deltas: np.ndarray, minimum_periods: int, old_wt_factor: float, new_wt: float, old_wt: np.ndarray, adjust: bool, ignore_na: bool, ): """ Compute online exponentially weighted mean per column over 2D values. Takes the first observation as is, then computes the subsequent exponentially weighted mean accounting minimum periods. """ result = np.empty(values.shape) weighted_avg = values[0] nobs = (~np.isnan(weighted_avg)).astype(np.int64) result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) for i in range(1, len(values)): cur = values[i] is_observations = ~np.isnan(cur) nobs += is_observations.astype(np.int64) for j in numba.prange(len(cur)): if not np.isnan(weighted_avg[j]): if is_observations[j] or not ignore_na: # note that len(deltas) = len(vals) - 1 and deltas[i] is to be # used in conjunction with vals[i+1] old_wt[j] *= old_wt_factor ** deltas[j - 1] if is_observations[j]: # avoid numerical errors on constant series if weighted_avg[j] != cur[j]: weighted_avg[j] = ( (old_wt[j] * weighted_avg[j]) + (new_wt * cur[j]) ) / (old_wt[j] + new_wt) if adjust: old_wt[j] += new_wt else: old_wt[j] = 1.0 elif is_observations[j]: weighted_avg[j] = cur[j] result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) return result, old_wt return online_ewma
Generate a numba jitted groupby ewma function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,150
from __future__ import annotations from textwrap import dedent from pandas.core.shared_docs import _shared_docs The provided code snippet includes necessary dependencies for implementing the `create_section_header` function. Write a Python function `def create_section_header(header: str) -> str` to solve the following problem: Create numpydoc section header Here is the function: def create_section_header(header: str) -> str: """Create numpydoc section header""" return f"{header}\n{'-' * len(header)}\n"
Create numpydoc section header
173,151
from __future__ import annotations from textwrap import dedent from pandas.core.shared_docs import _shared_docs def dedent(text: str) -> str: ... def window_agg_numba_parameters(version: str = "1.3") -> str: return ( dedent( """ engine : str, default None * ``'cython'`` : Runs the operation through C-extensions from cython. * ``'numba'`` : Runs the operation through JIT compiled code from numba. * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: {version}.0 engine_kwargs : dict, default None * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` .. versionadded:: {version}.0\n """ ) .replace("\n", "", 1) .replace("{version}", version) )
null
173,152
from __future__ import annotations import datetime from functools import partial from textwrap import dedent from typing import TYPE_CHECKING import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations from pandas._typing import ( Axis, TimedeltaConvertibleTypes, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_datetime64_ns_dtype, is_numeric_dtype, ) from pandas.core.dtypes.missing import isna from pandas.core import common from pandas.core.indexers.objects import ( BaseIndexer, ExponentialMovingWindowIndexer, GroupbyIndexer, ) from pandas.core.util.numba_ import ( get_jit_arguments, maybe_use_numba, ) from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( _shared_docs, create_section_header, kwargs_numeric_only, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, ) from pandas.core.window.numba_ import ( generate_numba_ewm_func, generate_numba_ewm_table_func, ) from pandas.core.window.online import ( EWMMeanState, generate_online_numba_ewma_func, ) from pandas.core.window.rolling import ( BaseWindow, BaseWindowGroupby, ) def get_center_of_mass( comass: float | None, span: float | None, halflife: float | None, alpha: float | None, ) -> float: valid_count = common.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 if comass is not None: if comass < 0: raise ValueError("comass must satisfy: comass >= 0") elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") comass = (span - 1) / 2 elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") decay = 1 - np.exp(np.log(0.5) / halflife) comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") comass = (1 - alpha) / alpha else: raise ValueError("Must pass one of comass, span, halflife, or alpha") return float(comass)
null
173,153
from __future__ import annotations import datetime from functools import partial from textwrap import dedent from typing import TYPE_CHECKING import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations from pandas._typing import ( Axis, TimedeltaConvertibleTypes, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_datetime64_ns_dtype, is_numeric_dtype, ) from pandas.core.dtypes.missing import isna from pandas.core import common from pandas.core.indexers.objects import ( BaseIndexer, ExponentialMovingWindowIndexer, GroupbyIndexer, ) from pandas.core.util.numba_ import ( get_jit_arguments, maybe_use_numba, ) from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( _shared_docs, create_section_header, kwargs_numeric_only, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, ) from pandas.core.window.numba_ import ( generate_numba_ewm_func, generate_numba_ewm_table_func, ) from pandas.core.window.online import ( EWMMeanState, generate_online_numba_ewma_func, ) from pandas.core.window.rolling import ( BaseWindow, BaseWindowGroupby, ) TimedeltaConvertibleTypes = Union[ "Timedelta", timedelta, np.timedelta64, np.int64, float, str ] class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") The provided code snippet includes necessary dependencies for implementing the `_calculate_deltas` function. Write a Python function `def _calculate_deltas( times: np.ndarray | NDFrame, halflife: float | TimedeltaConvertibleTypes | None, ) -> np.ndarray` to solve the following problem: Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : np.ndarray, Series Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life Here is the function: def _calculate_deltas( times: np.ndarray | NDFrame, halflife: float | TimedeltaConvertibleTypes | None, ) -> np.ndarray: """ Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : np.ndarray, Series Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life """ _times = np.asarray(times.view(np.int64), dtype=np.float64) # TODO: generalize to non-nano? _halflife = float(Timedelta(halflife).as_unit("ns")._value) return np.diff(_times) / _halflife
Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : np.ndarray, Series Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life
173,154
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function Any = object() TYPE_CHECKING = True class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def jit_user_function( func: Callable, nopython: bool, nogil: bool, parallel: bool ) -> Callable: """ JIT the user's function given the configurable arguments. Parameters ---------- func : function user defined function nopython : bool nopython parameter for numba.JIT nogil : bool nogil parameter for numba.JIT parallel : bool parallel parameter for numba.JIT Returns ------- function Numba JITed function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func else: def numba_func(data, *_args): if getattr(np, func.__name__, False) is func or isinstance( func, types.BuiltinFunctionType ): jf = func else: jf = numba.jit(func, nopython=nopython, nogil=nogil) def impl(data, *_args): return jf(data, *_args) return impl return numba_func The provided code snippet includes necessary dependencies for implementing the `generate_numba_apply_func` function. Write a Python function `def generate_numba_apply_func( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, )` to solve the following problem: Generate a numba jitted apply function specified by values from engine_kwargs. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the rolling apply function. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_numba_apply_func( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, ): """ Generate a numba jitted apply function specified by values from engine_kwargs. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the rolling apply function. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ numba_func = jit_user_function(func, nopython, nogil, parallel) if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_apply( values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any, ) -> np.ndarray: result = np.empty(len(begin)) for i in numba.prange(len(result)): start = begin[i] stop = end[i] window = values[start:stop] count_nan = np.sum(np.isnan(window)) if len(window) - count_nan >= minimum_periods: result[i] = numba_func(window, *args) else: result[i] = np.nan return result return roll_apply
Generate a numba jitted apply function specified by values from engine_kwargs. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the rolling apply function. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,155
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function TYPE_CHECKING = True def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module The provided code snippet includes necessary dependencies for implementing the `generate_numba_ewm_func` function. Write a Python function `def generate_numba_ewm_func( nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool, )` to solve the following problem: Generate a numba jitted ewm mean or sum function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize : bool Returns ------- Numba function Here is the function: def generate_numba_ewm_func( nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool, ): """ Generate a numba jitted ewm mean or sum function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize : bool Returns ------- Numba function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm( values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, ) -> np.ndarray: result = np.empty(len(values)) alpha = 1.0 / (1.0 + com) old_wt_factor = 1.0 - alpha new_wt = 1.0 if adjust else alpha for i in numba.prange(len(begin)): start = begin[i] stop = end[i] window = values[start:stop] sub_result = np.empty(len(window)) weighted = window[0] nobs = int(not np.isnan(weighted)) sub_result[0] = weighted if nobs >= minimum_periods else np.nan old_wt = 1.0 for j in range(1, len(window)): cur = window[j] is_observation = not np.isnan(cur) nobs += is_observation if not np.isnan(weighted): if is_observation or not ignore_na: if normalize: # note that len(deltas) = len(vals) - 1 and deltas[i] # is to be used in conjunction with vals[i+1] old_wt *= old_wt_factor ** deltas[start + j - 1] else: weighted = old_wt_factor * weighted if is_observation: if normalize: # avoid numerical errors on constant series if weighted != cur: weighted = old_wt * weighted + new_wt * cur if normalize: weighted = weighted / (old_wt + new_wt) if adjust: old_wt += new_wt else: old_wt = 1.0 else: weighted += cur elif is_observation: weighted = cur sub_result[j] = weighted if nobs >= minimum_periods else np.nan result[start:stop] = sub_result return result return ewm
Generate a numba jitted ewm mean or sum function specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize : bool Returns ------- Numba function
173,156
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function Any = object() TYPE_CHECKING = True class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def jit_user_function( func: Callable, nopython: bool, nogil: bool, parallel: bool ) -> Callable: """ JIT the user's function given the configurable arguments. Parameters ---------- func : function user defined function nopython : bool nopython parameter for numba.JIT nogil : bool nogil parameter for numba.JIT parallel : bool parallel parameter for numba.JIT Returns ------- function Numba JITed function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func else: def numba_func(data, *_args): if getattr(np, func.__name__, False) is func or isinstance( func, types.BuiltinFunctionType ): jf = func else: jf = numba.jit(func, nopython=nopython, nogil=nogil) def impl(data, *_args): return jf(data, *_args) return impl return numba_func The provided code snippet includes necessary dependencies for implementing the `generate_numba_table_func` function. Write a Python function `def generate_numba_table_func( func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool, )` to solve the following problem: Generate a numba jitted function to apply window calculations table-wise. Func will be passed a M window size x N number of columns array, and must return a 1 x N number of columns array. Func is intended to operate row-wise, but the result will be transposed for axis=1. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_numba_table_func( func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool, ): """ Generate a numba jitted function to apply window calculations table-wise. Func will be passed a M window size x N number of columns array, and must return a 1 x N number of columns array. Func is intended to operate row-wise, but the result will be transposed for axis=1. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ numba_func = jit_user_function(func, nopython, nogil, parallel) if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_table( values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any, ): result = np.empty((len(begin), values.shape[1])) min_periods_mask = np.empty(result.shape) for i in numba.prange(len(result)): start = begin[i] stop = end[i] window = values[start:stop] count_nan = np.sum(np.isnan(window), axis=0) sub_result = numba_func(window, *args) nan_mask = len(window) - count_nan >= minimum_periods min_periods_mask[i, :] = nan_mask result[i, :] = sub_result result = np.where(min_periods_mask, result, np.nan) return result return roll_table
Generate a numba jitted function to apply window calculations table-wise. Func will be passed a M window size x N number of columns array, and must return a 1 x N number of columns array. Func is intended to operate row-wise, but the result will be transposed for axis=1. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,157
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function TYPE_CHECKING = True def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def generate_manual_numpy_nan_agg_with_axis(nan_func): if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=True, nogil=True, parallel=True) def nan_agg_with_axis(table): result = np.empty(table.shape[1]) for i in numba.prange(table.shape[1]): partition = table[:, i] result[i] = nan_func(partition) return result return nan_agg_with_axis
null
173,158
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Any, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function TYPE_CHECKING = True def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module The provided code snippet includes necessary dependencies for implementing the `generate_numba_ewm_table_func` function. Write a Python function `def generate_numba_ewm_table_func( nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool, )` to solve the following problem: Generate a numba jitted ewm mean or sum function applied table wise specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize: bool Returns ------- Numba function Here is the function: def generate_numba_ewm_table_func( nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool, ): """ Generate a numba jitted ewm mean or sum function applied table wise specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize: bool Returns ------- Numba function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm_table( values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, ) -> np.ndarray: alpha = 1.0 / (1.0 + com) old_wt_factor = 1.0 - alpha new_wt = 1.0 if adjust else alpha old_wt = np.ones(values.shape[1]) result = np.empty(values.shape) weighted = values[0].copy() nobs = (~np.isnan(weighted)).astype(np.int64) result[0] = np.where(nobs >= minimum_periods, weighted, np.nan) for i in range(1, len(values)): cur = values[i] is_observations = ~np.isnan(cur) nobs += is_observations.astype(np.int64) for j in numba.prange(len(cur)): if not np.isnan(weighted[j]): if is_observations[j] or not ignore_na: if normalize: # note that len(deltas) = len(vals) - 1 and deltas[i] # is to be used in conjunction with vals[i+1] old_wt[j] *= old_wt_factor ** deltas[i - 1] else: weighted[j] = old_wt_factor * weighted[j] if is_observations[j]: if normalize: # avoid numerical errors on constant series if weighted[j] != cur[j]: weighted[j] = ( old_wt[j] * weighted[j] + new_wt * cur[j] ) if normalize: weighted[j] = weighted[j] / (old_wt[j] + new_wt) if adjust: old_wt[j] += new_wt else: old_wt[j] = 1.0 else: weighted[j] += cur[j] elif is_observations[j]: weighted[j] = cur[j] result[i] = np.where(nobs >= minimum_periods, weighted, np.nan) return result return ewm_table
Generate a numba jitted ewm mean or sum function applied table wise specified by values from engine_kwargs. Parameters ---------- nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit com : float adjust : bool ignore_na : bool deltas : tuple normalize: bool Returns ------- Numba function
173,159
from __future__ import annotations from collections import defaultdict from typing import cast import numpy as np from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.indexes.api import MultiIndex def prep_binary(arg1, arg2): # mask out values, this also makes a common index... X = arg1 + 0 * arg2 Y = arg2 + 0 * arg1 return X, Y class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) def flex_binary_moment(arg1, arg2, f, pairwise: bool = False): if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): X, Y = prep_binary(arg1, arg2) return f(X, Y) elif isinstance(arg1, ABCDataFrame): from pandas import DataFrame def dataframe_from_int_dict(data, frame_template) -> DataFrame: result = DataFrame(data, index=frame_template.index) if len(result.columns) > 0: result.columns = frame_template.columns[result.columns] else: result.columns = frame_template.columns.copy() return result results = {} if isinstance(arg2, ABCDataFrame): if pairwise is False: if arg1 is arg2: # special case in order to handle duplicate column names for i in range(len(arg1.columns)): results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) return dataframe_from_int_dict(results, arg1) else: if not arg1.columns.is_unique: raise ValueError("'arg1' columns are not unique") if not arg2.columns.is_unique: raise ValueError("'arg2' columns are not unique") X, Y = arg1.align(arg2, join="outer") X, Y = prep_binary(X, Y) res_columns = arg1.columns.union(arg2.columns) for col in res_columns: if col in X and col in Y: results[col] = f(X[col], Y[col]) return DataFrame(results, index=X.index, columns=res_columns) elif pairwise is True: results = defaultdict(dict) for i in range(len(arg1.columns)): for j in range(len(arg2.columns)): if j < i and arg2 is arg1: # Symmetric case results[i][j] = results[j][i] else: results[i][j] = f( *prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) ) from pandas import concat result_index = arg1.index.union(arg2.index) if len(result_index): # construct result frame result = concat( [ concat( [results[i][j] for j in range(len(arg2.columns))], ignore_index=True, ) for i in range(len(arg1.columns)) ], ignore_index=True, axis=1, ) result.columns = arg1.columns # set the index and reorder if arg2.columns.nlevels > 1: # mypy needs to know columns is a MultiIndex, Index doesn't # have levels attribute arg2.columns = cast(MultiIndex, arg2.columns) # GH 21157: Equivalent to MultiIndex.from_product( # [result_index], <unique combinations of arg2.columns.levels>, # ) # A normal MultiIndex.from_product will produce too many # combinations. result_level = np.tile( result_index, len(result) // len(result_index) ) arg2_levels = ( np.repeat( arg2.columns.get_level_values(i), len(result) // len(arg2.columns), ) for i in range(arg2.columns.nlevels) ) result_names = list(arg2.columns.names) + [result_index.name] result.index = MultiIndex.from_arrays( [*arg2_levels, result_level], names=result_names ) # GH 34440 num_levels = len(result.index.levels) new_order = [num_levels - 1] + list(range(num_levels - 1)) result = result.reorder_levels(new_order).sort_index() else: result.index = MultiIndex.from_product( [range(len(arg2.columns)), range(len(result_index))] ) result = result.swaplevel(1, 0).sort_index() result.index = MultiIndex.from_product( [result_index] + [arg2.columns] ) else: # empty result result = DataFrame( index=MultiIndex( levels=[arg1.index, arg2.columns], codes=[[], []] ), columns=arg2.columns, dtype="float64", ) # reset our index names to arg1 names # reset our column names to arg2 names # careful not to mutate the original names result.columns = result.columns.set_names(arg1.columns.names) result.index = result.index.set_names( result_index.names + arg2.columns.names ) return result else: results = { i: f(*prep_binary(arg1.iloc[:, i], arg2)) for i in range(len(arg1.columns)) } return dataframe_from_int_dict(results, arg1) else: return flex_binary_moment(arg2, arg1, f)
null
173,160
from __future__ import annotations from collections import defaultdict from typing import cast import numpy as np from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.indexes.api import MultiIndex ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) def zsqrt(x): with np.errstate(all="ignore"): result = np.sqrt(x) mask = x < 0 if isinstance(x, ABCDataFrame): if mask._values.any(): result[mask] = 0 else: if mask.any(): result[mask] = 0 return result
null
173,161
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `names_compat` function. Write a Python function `def names_compat(meth: F) -> F` to solve the following problem: A decorator to allow either `name` or `names` keyword but not both. This makes it easier to share code with base class. Here is the function: def names_compat(meth: F) -> F: """ A decorator to allow either `name` or `names` keyword but not both. This makes it easier to share code with base class. """ @wraps(meth) def new_meth(self_or_cls, *args, **kwargs): if "name" in kwargs and "names" in kwargs: raise TypeError("Can only provide one of `names` and `name`") if "name" in kwargs: kwargs["names"] = kwargs.pop("name") return meth(self_or_cls, *args, **kwargs) return cast(F, new_meth)
A decorator to allow either `name` or `names` keyword but not both. This makes it easier to share code with base class.
173,162
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing ensure_int64 = algos.ensure_int64 The provided code snippet includes necessary dependencies for implementing the `_lexsort_depth` function. Write a Python function `def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int` to solve the following problem: Count depth (up to a maximum of `nlevels`) with which codes are lexsorted. Here is the function: def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int: """Count depth (up to a maximum of `nlevels`) with which codes are lexsorted.""" int64_codes = [ensure_int64(level_codes) for level_codes in codes] for k in range(nlevels, 0, -1): if libalgos.is_lexsorted(int64_codes[:k]): return k return 0
Count depth (up to a maximum of `nlevels`) with which codes are lexsorted.
173,163
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing def sparsify_labels(label_list, start: int = 0, sentinel: object = ""): pivoted = list(zip(*label_list)) k = len(label_list) result = pivoted[: start + 1] prev = pivoted[start] for cur in pivoted[start + 1 :]: sparse_cur = [] for i, (p, t) in enumerate(zip(prev, cur)): if i == k - 1: sparse_cur.append(t) result.append(sparse_cur) break if p == t: sparse_cur.append(sentinel) else: sparse_cur.extend(cur[i:]) result.append(sparse_cur) break prev = cur return list(zip(*result))
null
173,164
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing def is_extension_array_dtype(arr_or_dtype) -> bool: def _get_na_rep(dtype) -> str: if is_extension_array_dtype(dtype): return f"{dtype.na_value}" else: dtype = dtype.type return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
null
173,165
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `maybe_droplevels` function. Write a Python function `def maybe_droplevels(index: Index, key) -> Index` to solve the following problem: Attempt to drop level or levels from the given index. Parameters ---------- index: Index key : scalar or tuple Returns ------- Index Here is the function: def maybe_droplevels(index: Index, key) -> Index: """ Attempt to drop level or levels from the given index. Parameters ---------- index: Index key : scalar or tuple Returns ------- Index """ # drop levels original_index = index if isinstance(key, tuple): # Caller is responsible for ensuring the key is not an entry in the first # level of the MultiIndex. for _ in key: try: index = index._drop_level_numbers([0]) except ValueError: # we have dropped too much, so back out return original_index else: try: index = index._drop_level_numbers([0]) except ValueError: pass return index
Attempt to drop level or levels from the given index. Parameters ---------- index: Index key : scalar or tuple Returns ------- Index
173,166
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing def coerce_indexer_dtype(indexer, categories) -> np.ndarray: """coerce the indexer input array to the smallest dtype possible""" length = len(categories) if length < _int8_max: return ensure_int8(indexer) elif length < _int16_max: return ensure_int16(indexer) elif length < _int32_max: return ensure_int32(indexer) return ensure_int64(indexer) The provided code snippet includes necessary dependencies for implementing the `_coerce_indexer_frozen` function. Write a Python function `def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray` to solve the following problem: Coerce the array-like indexer to the smallest integer dtype that can encode all of the given categories. Parameters ---------- array_like : array-like categories : array-like copy : bool Returns ------- np.ndarray Non-writeable. Here is the function: def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray: """ Coerce the array-like indexer to the smallest integer dtype that can encode all of the given categories. Parameters ---------- array_like : array-like categories : array-like copy : bool Returns ------- np.ndarray Non-writeable. """ array_like = coerce_indexer_dtype(array_like, categories) if copy: array_like = array_like.copy() array_like.flags.writeable = False return array_like
Coerce the array-like indexer to the smallest integer dtype that can encode all of the given categories. Parameters ---------- array_like : array-like categories : array-like copy : bool Returns ------- np.ndarray Non-writeable.
173,167
from __future__ import annotations from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Collection, Hashable, Iterable, List, Literal, Sequence, Tuple, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( algos as libalgos, index as libindex, lib, ) from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, Scalar, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( InvalidIndexError, PerformanceWarning, UnsortedIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_categorical_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import ( array_equivalent, isna, ) import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, get_unanimous_names, ) from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, lexsort_indexer, ) from pandas.io.formats.printing import pprint_thing The provided code snippet includes necessary dependencies for implementing the `_require_listlike` function. Write a Python function `def _require_listlike(level, arr, arrname: str)` to solve the following problem: Ensure that level is either None or listlike, and arr is list-of-listlike. Here is the function: def _require_listlike(level, arr, arrname: str): """ Ensure that level is either None or listlike, and arr is list-of-listlike. """ if level is not None and not is_list_like(level): if not is_list_like(arr): raise TypeError(f"{arrname} must be list-like") if len(arr) > 0 and is_list_like(arr[0]): raise TypeError(f"{arrname} must be list-like") level = [level] arr = [arr] elif level is None or is_list_like(level): if not is_list_like(arr) or not is_list_like(arr[0]): raise TypeError(f"{arrname} must be list of lists-like") return level, arr
Ensure that level is either None or listlike, and arr is list-of-listlike.
173,168
from __future__ import annotations from operator import ( le, lt, ) import textwrap from typing import ( Any, Hashable, Literal, ) import numpy as np from pandas._libs import lib from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, ) from pandas._libs.tslibs import ( BaseOffset, Timedelta, Timestamp, to_offset, ) from pandas._typing import ( Dtype, DtypeObj, IntervalClosedType, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import ( Appender, cache_readonly, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_box_datetimelike, maybe_downcast_numeric, maybe_upcast_numeric_to_64bit, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float, is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_list_like, is_number, is_object_dtype, is_scalar, ) from pandas.core.dtypes.dtypes import IntervalDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.algorithms import unique from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, ) import pandas.core.common as com from pandas.core.indexers import is_valid_positional_slice import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, maybe_extract_name, ) from pandas.core.indexes.datetimes import ( DatetimeIndex, date_range, ) from pandas.core.indexes.extension import ( ExtensionIndex, inherit_names, ) from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.timedeltas import ( TimedeltaIndex, timedelta_range, ) def is_datetime64tz_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of a DatetimeTZDtype dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True """ if isinstance(arr_or_dtype, DatetimeTZDtype): # GH#33400 fastpath for dtype object # GH 34986 return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a timedelta64 or datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a timedelta64, or datetime64 dtype. Examples -------- >>> is_datetime_or_timedelta_dtype(str) False >>> is_datetime_or_timedelta_dtype(int) False >>> is_datetime_or_timedelta_dtype(np.datetime64) True >>> is_datetime_or_timedelta_dtype(np.timedelta64) True >>> is_datetime_or_timedelta_dtype(np.array(['a', 'b'])) False >>> is_datetime_or_timedelta_dtype(pd.Series([1, 2])) False >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64)) True >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64)) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def _get_next_label(label): dtype = getattr(label, "dtype", type(label)) if isinstance(label, (Timestamp, Timedelta)): dtype = "datetime64" if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype): return label + np.timedelta64(1, "ns") elif is_integer_dtype(dtype): return label + 1 elif is_float_dtype(dtype): return np.nextafter(label, np.infty) else: raise TypeError(f"cannot determine next label for type {repr(type(label))}")
null
173,169
from __future__ import annotations from operator import ( le, lt, ) import textwrap from typing import ( Any, Hashable, Literal, ) import numpy as np from pandas._libs import lib from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, ) from pandas._libs.tslibs import ( BaseOffset, Timedelta, Timestamp, to_offset, ) from pandas._typing import ( Dtype, DtypeObj, IntervalClosedType, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import ( Appender, cache_readonly, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_box_datetimelike, maybe_downcast_numeric, maybe_upcast_numeric_to_64bit, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float, is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_list_like, is_number, is_object_dtype, is_scalar, ) from pandas.core.dtypes.dtypes import IntervalDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.algorithms import unique from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, ) import pandas.core.common as com from pandas.core.indexers import is_valid_positional_slice import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, maybe_extract_name, ) from pandas.core.indexes.datetimes import ( DatetimeIndex, date_range, ) from pandas.core.indexes.extension import ( ExtensionIndex, inherit_names, ) from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.timedeltas import ( TimedeltaIndex, timedelta_range, ) def is_datetime64tz_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of a DatetimeTZDtype dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True """ if isinstance(arr_or_dtype, DatetimeTZDtype): # GH#33400 fastpath for dtype object # GH 34986 return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a timedelta64 or datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a timedelta64, or datetime64 dtype. Examples -------- >>> is_datetime_or_timedelta_dtype(str) False >>> is_datetime_or_timedelta_dtype(int) False >>> is_datetime_or_timedelta_dtype(np.datetime64) True >>> is_datetime_or_timedelta_dtype(np.timedelta64) True >>> is_datetime_or_timedelta_dtype(np.array(['a', 'b'])) False >>> is_datetime_or_timedelta_dtype(pd.Series([1, 2])) False >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64)) True >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64)) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def _get_prev_label(label): dtype = getattr(label, "dtype", type(label)) if isinstance(label, (Timestamp, Timedelta)): dtype = "datetime64" if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype): return label - np.timedelta64(1, "ns") elif is_integer_dtype(dtype): return label - 1 elif is_float_dtype(dtype): return np.nextafter(label, -np.infty) else: raise TypeError(f"cannot determine next label for type {repr(type(label))}")
null
173,170
from __future__ import annotations from operator import ( le, lt, ) import textwrap from typing import ( Any, Hashable, Literal, ) import numpy as np from pandas._libs import lib from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, ) from pandas._libs.tslibs import ( BaseOffset, Timedelta, Timestamp, to_offset, ) from pandas._typing import ( Dtype, DtypeObj, IntervalClosedType, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import ( Appender, cache_readonly, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_box_datetimelike, maybe_downcast_numeric, maybe_upcast_numeric_to_64bit, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float, is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_list_like, is_number, is_object_dtype, is_scalar, ) from pandas.core.dtypes.dtypes import IntervalDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.algorithms import unique from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, ) import pandas.core.common as com from pandas.core.indexers import is_valid_positional_slice import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, maybe_extract_name, ) from pandas.core.indexes.datetimes import ( DatetimeIndex, date_range, ) from pandas.core.indexes.extension import ( ExtensionIndex, inherit_names, ) from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.timedeltas import ( TimedeltaIndex, timedelta_range, ) The provided code snippet includes necessary dependencies for implementing the `_new_IntervalIndex` function. Write a Python function `def _new_IntervalIndex(cls, d)` to solve the following problem: This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. Here is the function: def _new_IntervalIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. """ return cls.from_arrays(**d)
This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__.
173,171
from __future__ import annotations from operator import ( le, lt, ) import textwrap from typing import ( Any, Hashable, Literal, ) import numpy as np from pandas._libs import lib from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, ) from pandas._libs.tslibs import ( BaseOffset, Timedelta, Timestamp, to_offset, ) from pandas._typing import ( Dtype, DtypeObj, IntervalClosedType, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import ( Appender, cache_readonly, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_box_datetimelike, maybe_downcast_numeric, maybe_upcast_numeric_to_64bit, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float, is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_list_like, is_number, is_object_dtype, is_scalar, ) from pandas.core.dtypes.dtypes import IntervalDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.algorithms import unique from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, ) import pandas.core.common as com from pandas.core.indexers import is_valid_positional_slice import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, _index_shared_docs, ensure_index, maybe_extract_name, ) from pandas.core.indexes.datetimes import ( DatetimeIndex, date_range, ) from pandas.core.indexes.extension import ( ExtensionIndex, inherit_names, ) from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.timedeltas import ( TimedeltaIndex, timedelta_range, ) class IntervalIndex(ExtensionIndex): _typ = "intervalindex" # annotate properties pinned via inherit_names closed: IntervalClosedType is_non_overlapping_monotonic: bool closed_left: bool closed_right: bool open_left: bool open_right: bool _data: IntervalArray _values: IntervalArray _can_hold_strings = False _data_cls = IntervalArray # -------------------------------------------------------------------- # Constructors def __new__( cls, data, closed=None, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, verify_integrity: bool = True, ) -> IntervalIndex: name = maybe_extract_name(name, data, cls) with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray( data, closed=closed, copy=copy, dtype=dtype, verify_integrity=verify_integrity, ) return cls._simple_new(array, name) _interval_shared_docs["from_breaks"] % { "klass": "IntervalIndex", "name": textwrap.dedent( """ name : str, optional Name of the resulting IntervalIndex.""" ), "examples": textwrap.dedent( """\ Examples -------- >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ ), } ) def from_breaks( cls, breaks, closed: IntervalClosedType | None = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_breaks( breaks, closed=closed, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) _interval_shared_docs["from_arrays"] % { "klass": "IntervalIndex", "name": textwrap.dedent( """ name : str, optional Name of the resulting IntervalIndex.""" ), "examples": textwrap.dedent( """\ Examples -------- >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ ), } ) def from_arrays( cls, left, right, closed: IntervalClosedType = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_arrays( left, right, closed, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) _interval_shared_docs["from_tuples"] % { "klass": "IntervalIndex", "name": textwrap.dedent( """ name : str, optional Name of the resulting IntervalIndex.""" ), "examples": textwrap.dedent( """\ Examples -------- >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') """ ), } ) def from_tuples( cls, data, closed: IntervalClosedType = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(arr, name=name) # -------------------------------------------------------------------- # error: Return type "IntervalTree" of "_engine" incompatible with return type # "Union[IndexEngine, ExtensionEngine]" in supertype "Index" def _engine(self) -> IntervalTree: # type: ignore[override] # IntervalTree does not supports numpy array unless they are 64 bit left = self._maybe_convert_i8(self.left) left = maybe_upcast_numeric_to_64bit(left) right = self._maybe_convert_i8(self.right) right = maybe_upcast_numeric_to_64bit(right) return IntervalTree(left, right, closed=self.closed) def __contains__(self, key: Any) -> bool: """ return a boolean if this key is IN the index We *only* accept an Interval Parameters ---------- key : Interval Returns ------- bool """ hash(key) if not isinstance(key, Interval): if is_valid_na_for_dtype(key, self.dtype): return self.hasnans return False try: self.get_loc(key) return True except KeyError: return False def _multiindex(self) -> MultiIndex: return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) def __reduce__(self): d = { "left": self.left, "right": self.right, "closed": self.closed, "name": self.name, } return _new_IntervalIndex, (type(self), d), None def inferred_type(self) -> str: """Return a string of the type inferred from the values""" return "interval" # Cannot determine type of "memory_usage" def memory_usage(self, deep: bool = False) -> int: # we don't use an explicit engine # so return the bytes here return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep) # IntervalTree doesn't have a is_monotonic_decreasing, so have to override # the Index implementation def is_monotonic_decreasing(self) -> bool: """ Return True if the IntervalIndex is monotonic decreasing (only equal or decreasing values), else False """ return self[::-1].is_monotonic_increasing def is_unique(self) -> bool: """ Return True if the IntervalIndex contains unique elements, else False. """ left = self.left right = self.right if self.isna().sum() > 1: return False if left.is_unique or right.is_unique: return True seen_pairs = set() check_idx = np.where(left.duplicated(keep=False))[0] for idx in check_idx: pair = (left[idx], right[idx]) if pair in seen_pairs: return False seen_pairs.add(pair) return True def is_overlapping(self) -> bool: """ Return True if the IntervalIndex has overlapping intervals, else False. Two intervals overlap if they share a common point, including closed endpoints. Intervals that only have an open endpoint in common do not overlap. Returns ------- bool Boolean indicating if the IntervalIndex has overlapping intervals. See Also -------- Interval.overlaps : Check whether two Interval objects overlap. IntervalIndex.overlaps : Check an IntervalIndex elementwise for overlaps. Examples -------- >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)]) >>> index IntervalIndex([(0, 2], (1, 3], (4, 5]], dtype='interval[int64, right]') >>> index.is_overlapping True Intervals that share closed endpoints overlap: >>> index = pd.interval_range(0, 3, closed='both') >>> index IntervalIndex([[0, 1], [1, 2], [2, 3]], dtype='interval[int64, both]') >>> index.is_overlapping True Intervals that only have an open endpoint in common do not overlap: >>> index = pd.interval_range(0, 3, closed='left') >>> index IntervalIndex([[0, 1), [1, 2), [2, 3)], dtype='interval[int64, left]') >>> index.is_overlapping False """ # GH 23309 return self._engine.is_overlapping def _needs_i8_conversion(self, key) -> bool: """ Check if a given key needs i8 conversion. Conversion is necessary for Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An Interval-like requires conversion if its endpoints are one of the aforementioned types. Assumes that any list-like data has already been cast to an Index. Parameters ---------- key : scalar or Index-like The key that should be checked for i8 conversion Returns ------- bool """ if is_interval_dtype(key) or isinstance(key, Interval): return self._needs_i8_conversion(key.left) i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex) return isinstance(key, i8_types) def _maybe_convert_i8(self, key): """ Maybe convert a given key to its equivalent i8 value(s). Used as a preprocessing step prior to IntervalTree queries (self._engine), which expects numeric data. Parameters ---------- key : scalar or list-like The key that should maybe be converted to i8. Returns ------- scalar or list-like The original key if no conversion occurred, int if converted scalar, Index with an int64 dtype if converted list-like. """ if is_list_like(key): key = ensure_index(key) key = maybe_upcast_numeric_to_64bit(key) if not self._needs_i8_conversion(key): return key scalar = is_scalar(key) if is_interval_dtype(key) or isinstance(key, Interval): # convert left/right and reconstruct left = self._maybe_convert_i8(key.left) right = self._maybe_convert_i8(key.right) constructor = Interval if scalar else IntervalIndex.from_arrays # error: "object" not callable return constructor( left, right, closed=self.closed ) # type: ignore[operator] if scalar: # Timestamp/Timedelta key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True) if lib.is_period(key): key_i8 = key.ordinal elif isinstance(key_i8, Timestamp): key_i8 = key_i8._value elif isinstance(key_i8, (np.datetime64, np.timedelta64)): key_i8 = key_i8.view("i8") else: # DatetimeIndex/TimedeltaIndex key_dtype, key_i8 = key.dtype, Index(key.asi8) if key.hasnans: # convert NaT from its i8 value to np.nan so it's not viewed # as a valid value, maybe causing errors (e.g. is_overlapping) key_i8 = key_i8.where(~key._isnan) # ensure consistency with IntervalIndex subtype # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], # ExtensionDtype]" has no attribute "subtype" subtype = self.dtype.subtype # type: ignore[union-attr] if not is_dtype_equal(subtype, key_dtype): raise ValueError( f"Cannot index an IntervalIndex of subtype {subtype} with " f"values of dtype {key_dtype}" ) return key_i8 def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if not self.is_non_overlapping_monotonic: raise KeyError( "can only get slices from an IntervalIndex if bounds are " "non-overlapping and all monotonic increasing or decreasing" ) if isinstance(label, (IntervalMixin, IntervalIndex)): raise NotImplementedError("Interval objects are not currently supported") # GH 20921: "not is_monotonic_increasing" for the second condition # instead of "is_monotonic_decreasing" to account for single element # indexes being both increasing and decreasing if (side == "left" and self.left.is_monotonic_increasing) or ( side == "right" and not self.left.is_monotonic_increasing ): sub_idx = self.right if self.open_right: label = _get_next_label(label) else: sub_idx = self.left if self.open_left: label = _get_prev_label(label) return sub_idx._searchsorted_monotonic(label, side) # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key) -> int | slice | np.ndarray: """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) >>> index = pd.IntervalIndex([i1, i2]) >>> index.get_loc(1) 0 You can also supply a point inside an interval. >>> index.get_loc(1.5) 1 If a label is in several intervals, you get the locations of all the relevant intervals. >>> i3 = pd.Interval(0, 2) >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) >>> overlapping_index.get_loc(0.5) array([ True, False, True]) Only exact matches will be returned if an interval is provided. >>> index.get_loc(pd.Interval(0, 1)) 0 """ self._check_indexing_error(key) if isinstance(key, Interval): if self.closed != key.closed: raise KeyError(key) mask = (self.left == key.left) & (self.right == key.right) elif is_valid_na_for_dtype(key, self.dtype): mask = self.isna() else: # assume scalar op_left = le if self.closed_left else lt op_right = le if self.closed_right else lt try: mask = op_left(self.left, key) & op_right(key, self.right) except TypeError as err: # scalar is not comparable to II subtype --> invalid label raise KeyError(key) from err matches = mask.sum() if matches == 0: raise KeyError(key) if matches == 1: return mask.argmax() res = lib.maybe_booleans_to_slice(mask.view("u1")) if isinstance(res, slice) and res.stop is None: # TODO: DO this in maybe_booleans_to_slice? res = slice(res.start, len(self), res.step) return res def _get_indexer( self, target: Index, method: str | None = None, limit: int | None = None, tolerance: Any | None = None, ) -> npt.NDArray[np.intp]: if isinstance(target, IntervalIndex): # We only get here with not self.is_overlapping # -> at most one match per interval in target # want exact matches -> need both left/right to match, so defer to # left/right get_indexer, compare elementwise, equality -> match indexer = self._get_indexer_unique_sides(target) elif not is_object_dtype(target.dtype): # homogeneous scalar index: use IntervalTree # we should always have self._should_partial_index(target) here target = self._maybe_convert_i8(target) indexer = self._engine.get_indexer(target.values) else: # heterogeneous scalar index: defer elementwise to get_loc # we should always have self._should_partial_index(target) here return self._get_indexer_pointwise(target)[0] return ensure_platform_int(indexer) def get_indexer_non_unique( self, target: Index ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) if not self._should_compare(target) and not self._should_partial_index(target): # e.g. IntervalIndex with different closed or incompatible subtype # -> no matches return self._get_indexer_non_comparable(target, None, unique=False) elif isinstance(target, IntervalIndex): if self.left.is_unique and self.right.is_unique: # fastpath available even if we don't have self._index_as_unique indexer = self._get_indexer_unique_sides(target) missing = (indexer == -1).nonzero()[0] else: return self._get_indexer_pointwise(target) elif is_object_dtype(target.dtype) or not self._should_partial_index(target): # target might contain intervals: defer elementwise to get_loc return self._get_indexer_pointwise(target) else: # Note: this case behaves differently from other Index subclasses # because IntervalIndex does partial-int indexing target = self._maybe_convert_i8(target) indexer, missing = self._engine.get_indexer_non_unique(target.values) return ensure_platform_int(indexer), ensure_platform_int(missing) def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]: """ _get_indexer specialized to the case where both of our sides are unique. """ # Caller is responsible for checking # `self.left.is_unique and self.right.is_unique` left_indexer = self.left.get_indexer(target.left) right_indexer = self.right.get_indexer(target.right) indexer = np.where(left_indexer == right_indexer, left_indexer, -1) return indexer def _get_indexer_pointwise( self, target: Index ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ pointwise implementation for get_indexer and get_indexer_non_unique. """ indexer, missing = [], [] for i, key in enumerate(target): try: locs = self.get_loc(key) if isinstance(locs, slice): # Only needed for get_indexer_non_unique locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") elif lib.is_integer(locs): locs = np.array(locs, ndmin=1) else: # otherwise we have ndarray[bool] locs = np.where(locs)[0] except KeyError: missing.append(i) locs = np.array([-1]) except InvalidIndexError: # i.e. non-scalar key e.g. a tuple. # see test_append_different_columns_types_raises missing.append(i) locs = np.array([-1]) indexer.append(locs) indexer = np.concatenate(indexer) return ensure_platform_int(indexer), ensure_platform_int(missing) def _index_as_unique(self) -> bool: return not self.is_overlapping and self._engine._na_count < 2 _requires_unique_msg = ( "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique" ) def _convert_slice_indexer(self, key: slice, kind: str): if not (key.step is None or key.step == 1): # GH#31658 if label-based, we require step == 1, # if positional, we disallow float start/stop msg = "label-based slicing with step!=1 is not supported for IntervalIndex" if kind == "loc": raise ValueError(msg) if kind == "getitem": if not is_valid_positional_slice(key): # i.e. this cannot be interpreted as a positional slice raise ValueError(msg) return super()._convert_slice_indexer(key, kind) def _should_fallback_to_positional(self) -> bool: # integer lookups in Series.__getitem__ are unambiguously # positional in this case # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], # ExtensionDtype]" has no attribute "subtype" return self.dtype.subtype.kind in ["m", "M"] # type: ignore[union-attr] def _maybe_cast_slice_bound(self, label, side: str): return getattr(self, side)._maybe_cast_slice_bound(label, side) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: if not isinstance(dtype, IntervalDtype): return False common_subtype = find_common_type([self.dtype, dtype]) return not is_object_dtype(common_subtype) # -------------------------------------------------------------------- def left(self) -> Index: return Index(self._data.left, copy=False) def right(self) -> Index: return Index(self._data.right, copy=False) def mid(self) -> Index: return Index(self._data.mid, copy=False) def length(self) -> Index: return Index(self._data.length, copy=False) # -------------------------------------------------------------------- # Rendering Methods # __repr__ associated methods are based on MultiIndex def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: # matches base class except for whitespace padding return header + list(self._format_native_types(na_rep=na_rep)) def _format_native_types( self, *, na_rep: str = "NaN", quoting=None, **kwargs ) -> npt.NDArray[np.object_]: # GH 28210: use base method but with different default na_rep return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) def _format_data(self, name=None) -> str: # TODO: integrate with categorical and make generic # name argument is unused here; just for compat with base / categorical return f"{self._data._format_data()},{self._format_space()}" # -------------------------------------------------------------------- # Set Operations def _intersection(self, other, sort): """ intersection specialized to the case with matching dtypes. """ # For IntervalIndex we also know other.closed == self.closed if self.left.is_unique and self.right.is_unique: taken = self._intersection_unique(other) elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1: # Swap other/self if other is unique and self does not have # multiple NaNs taken = other._intersection_unique(self) else: # duplicates taken = self._intersection_non_unique(other) if sort is None: taken = taken.sort_values() return taken def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex: """ Used when the IntervalIndex does not have any common endpoint, no matter left or right. Return the intersection with another IntervalIndex. Parameters ---------- other : IntervalIndex Returns ------- IntervalIndex """ # Note: this is much more performant than super()._intersection(other) lindexer = self.left.get_indexer(other.left) rindexer = self.right.get_indexer(other.right) match = (lindexer == rindexer) & (lindexer != -1) indexer = lindexer.take(match.nonzero()[0]) indexer = unique(indexer) return self.take(indexer) def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex: """ Used when the IntervalIndex does have some common endpoints, on either sides. Return the intersection with another IntervalIndex. Parameters ---------- other : IntervalIndex Returns ------- IntervalIndex """ # Note: this is about 3.25x faster than super()._intersection(other) # in IntervalIndexMethod.time_intersection_both_duplicate(1000) mask = np.zeros(len(self), dtype=bool) if self.hasnans and other.hasnans: first_nan_loc = np.arange(len(self))[self.isna()][0] mask[first_nan_loc] = True other_tups = set(zip(other.left, other.right)) for i, tup in enumerate(zip(self.left, self.right)): if tup in other_tups: mask[i] = True return self[mask] # -------------------------------------------------------------------- def _get_engine_target(self) -> np.ndarray: # Note: we _could_ use libjoin functions by either casting to object # dtype or constructing tuples (faster than constructing Intervals) # but the libjoin fastpaths are no longer fast in these cases. raise NotImplementedError( "IntervalIndex does not use libjoin fastpaths or pass values to " "IndexEngine objects" ) def _from_join_target(self, result): raise NotImplementedError("IntervalIndex does not use libjoin fastpaths") # TODO: arithmetic operations def _is_valid_endpoint(endpoint) -> bool: """ Helper for interval_range to check if start/end are valid types. """ return any( [ is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None, ] ) def _is_type_compatible(a, b) -> bool: """ Helper for interval_range to check type compat of start/end/freq. """ is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset)) is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset)) return ( (is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or com.any_none(a, b) ) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar: """ Cast scalar to Timestamp or Timedelta if scalar is datetime-like and dtype is not object. Parameters ---------- value : scalar dtype : Dtype, optional Returns ------- scalar """ if dtype == _dtype_obj: pass elif isinstance(value, (np.datetime64, dt.datetime)): value = Timestamp(value) elif isinstance(value, (np.timedelta64, dt.timedelta)): value = Timedelta(value) return value def maybe_downcast_numeric( result: np.ndarray, dtype: np.dtype, do_round: bool = False ) -> np.ndarray: ... def maybe_downcast_numeric( result: ExtensionArray, dtype: DtypeObj, do_round: bool = False ) -> ArrayLike: ... def maybe_downcast_numeric( result: ArrayLike, dtype: DtypeObj, do_round: bool = False ) -> ArrayLike: """ Subset of maybe_downcast_to_dtype restricted to numeric dtypes. Parameters ---------- result : ndarray or ExtensionArray dtype : np.dtype or ExtensionDtype do_round : bool Returns ------- ndarray or ExtensionArray """ if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): # e.g. SparseDtype has no itemsize attr return result def trans(x): if do_round: return x.round() return x if dtype.kind == result.dtype.kind: # don't allow upcasts here (except if empty) if result.dtype.itemsize <= dtype.itemsize and result.size: return result if is_bool_dtype(dtype) or is_integer_dtype(dtype): if not result.size: # if we don't have any elements, just astype it return trans(result).astype(dtype) # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([r[0]]) if isna(arr).any(): # if we have any nulls, then we are done return result elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)): # a comparable, e.g. a Decimal may slip in here return result if ( issubclass(result.dtype.type, (np.object_, np.number)) and notna(result).all() ): new_result = trans(result).astype(dtype) if new_result.dtype.kind == "O" or result.dtype.kind == "O": # np.allclose may raise TypeError on object-dtype if (new_result == result).all(): return new_result else: if np.allclose(new_result, result, rtol=0): return new_result elif ( issubclass(dtype.type, np.floating) and not is_bool_dtype(result.dtype) and not is_string_dtype(result.dtype) ): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "overflow encountered in cast", RuntimeWarning ) new_result = result.astype(dtype) # Adjust tolerances based on floating point size size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} atol = size_tols.get(new_result.dtype.itemsize, 0.0) # Check downcast float values are still equal within 7 digits when # converting from float64 to float32 if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): return new_result elif dtype.kind == result.dtype.kind == "c": new_result = result.astype(dtype) if np.array_equal(new_result, result, equal_nan=True): # TODO: use tolerance like we do for float? return new_result return result ) [ "tz", "tzinfo", "dtype", "to_pydatetime", "_format_native_types", "date", "time", "timetz", "std", ] ) class DatetimeIndex(DatetimeTimedeltaMixin): """ Immutable ndarray-like of datetime64 data. Represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and carry metadata. .. versionchanged:: 2.0.0 The various numeric date/time attributes (:attr:`~DatetimeIndex.day`, :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype ``int32``. Previously they had dtype ``int64``. Parameters ---------- data : array-like (1-dimensional) Datetime-like data to construct index with. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the inferred frequency upon creation. tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str Set the Timezone of the data. normalize : bool, default False Normalize start/end dates to midnight before generating date range. closed : {'left', 'right'}, optional Set whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. dayfirst : bool, default False If True, parse dates in `data` with the day first order. yearfirst : bool, default False If True parse dates in `data` with the year first order. dtype : numpy.dtype or DatetimeTZDtype or str, default None Note that the only NumPy dtype allowed is ‘datetime64[ns]’. copy : bool, default False Make a copy of input ndarray. name : label, default None Name to be stored in the index. Attributes ---------- year month day hour minute second microsecond nanosecond date time timetz dayofyear day_of_year weekofyear week dayofweek day_of_week weekday quarter tz freq freqstr is_month_start is_month_end is_quarter_start is_quarter_end is_year_start is_year_end is_leap_year inferred_freq Methods ------- normalize strftime snap tz_convert tz_localize round floor ceil to_period to_pydatetime to_series to_frame month_name day_name mean std See Also -------- Index : The base pandas Index type. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. to_datetime : Convert argument to datetime. date_range : Create a fixed-frequency DatetimeIndex. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. """ _typ = "datetimeindex" _data_cls = DatetimeArray _supports_partial_string_indexing = True def _engine_type(self) -> type[libindex.DatetimeEngine]: return libindex.DatetimeEngine _data: DatetimeArray tz: dt.tzinfo | None # -------------------------------------------------------------------- # methods that dispatch to DatetimeArray and wrap result def strftime(self, date_format) -> Index: arr = self._data.strftime(date_format) return Index(arr, name=self.name, dtype=object) def tz_convert(self, tz) -> DatetimeIndex: arr = self._data.tz_convert(tz) return type(self)._simple_new(arr, name=self.name, refs=self._references) def tz_localize( self, tz, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> DatetimeIndex: arr = self._data.tz_localize(tz, ambiguous, nonexistent) return type(self)._simple_new(arr, name=self.name) def to_period(self, freq=None) -> PeriodIndex: from pandas.core.indexes.api import PeriodIndex arr = self._data.to_period(freq) return PeriodIndex._simple_new(arr, name=self.name) def to_julian_date(self) -> Index: arr = self._data.to_julian_date() return Index._simple_new(arr, name=self.name) def isocalendar(self) -> DataFrame: df = self._data.isocalendar() return df.set_index(self) def _resolution_obj(self) -> Resolution: return self._data._resolution_obj # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, freq: Frequency | lib.NoDefault = lib.no_default, tz=lib.no_default, normalize: bool = False, closed=None, ambiguous: TimeAmbiguous = "raise", dayfirst: bool = False, yearfirst: bool = False, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, ) -> DatetimeIndex: if is_scalar(data): cls._raise_scalar_data_error(data) # - Cases checked above all return/raise before reaching here - # name = maybe_extract_name(name, data, cls) if ( isinstance(data, DatetimeArray) and freq is lib.no_default and tz is lib.no_default and dtype is None ): # fastpath, similar logic in TimedeltaIndex.__new__; # Note in this particular case we retain non-nano. if copy: data = data.copy() return cls._simple_new(data, name=name) dtarr = DatetimeArray._from_sequence_not_strict( data, dtype=dtype, copy=copy, tz=tz, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous, ) refs = None if not copy and isinstance(data, (Index, ABCSeries)): refs = data._references subarr = cls._simple_new(dtarr, name=name, refs=refs) return subarr # -------------------------------------------------------------------- def _is_dates_only(self) -> bool: """ Return a boolean if we are only dates (and don't have a timezone) Returns ------- bool """ from pandas.io.formats.format import is_dates_only # error: Argument 1 to "is_dates_only" has incompatible type # "Union[ExtensionArray, ndarray]"; expected "Union[ndarray, # DatetimeArray, Index, DatetimeIndex]" return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type] def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_DatetimeIndex, (type(self), d), None def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.tz is not None: # If we have tz, we can compare to tzaware return is_datetime64tz_dtype(dtype) # if we dont have tz, we can only compare to tznaive return is_datetime64_dtype(dtype) # -------------------------------------------------------------------- # Rendering Methods def _formatter_func(self): from pandas.io.formats.format import get_format_datetime64 formatter = get_format_datetime64(is_dates_only_=self._is_dates_only) return lambda x: f"'{formatter(x)}'" # -------------------------------------------------------------------- # Set Operation Methods def _can_range_setop(self, other) -> bool: # GH 46702: If self or other have non-UTC tzs, DST transitions prevent # range representation due to no singular step if ( self.tz is not None and not timezones.is_utc(self.tz) and not timezones.is_fixed_offset(self.tz) ): return False if ( other.tz is not None and not timezones.is_utc(other.tz) and not timezones.is_fixed_offset(other.tz) ): return False return super()._can_range_setop(other) # -------------------------------------------------------------------- def _get_time_micros(self) -> npt.NDArray[np.int64]: """ Return the number of microseconds since midnight. Returns ------- ndarray[int64_t] """ values = self._data._local_timestamps() ppd = periods_per_day(self._data._creso) frac = values % ppd if self.unit == "ns": micros = frac // 1000 elif self.unit == "us": micros = frac elif self.unit == "ms": micros = frac * 1000 elif self.unit == "s": micros = frac * 1_000_000 else: # pragma: no cover raise NotImplementedError(self.unit) micros[self._isnan] = -1 return micros def snap(self, freq: Frequency = "S") -> DatetimeIndex: """ Snap time stamps to nearest occurring frequency. Returns ------- DatetimeIndex """ # Superdumb, punting on any optimizing freq = to_offset(freq) dta = self._data.copy() for i, v in enumerate(self): s = v if not freq.is_on_offset(s): t0 = freq.rollback(s) t1 = freq.rollforward(s) if abs(s - t0) < abs(t1 - s): s = t0 else: s = t1 dta[i] = s return DatetimeIndex._simple_new(dta, name=self.name) # -------------------------------------------------------------------- # Indexing Methods def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime): """ Calculate datetime bounds for parsed time string and its resolution. Parameters ---------- reso : Resolution Resolution provided by parsed string. parsed : datetime Datetime from parsed string. Returns ------- lower, upper: pd.Timestamp """ per = Period(parsed, freq=reso.attr_abbrev) start, end = per.start_time, per.end_time # GH 24076 # If an incoming date string contained a UTC offset, need to localize # the parsed date to this offset first before aligning with the index's # timezone start = start.tz_localize(parsed.tzinfo) end = end.tz_localize(parsed.tzinfo) if parsed.tzinfo is not None: if self.tz is None: raise ValueError( "The index must be timezone aware when indexing " "with a date string with a UTC offset" ) # The flipped case with parsed.tz is None and self.tz is not None # is ruled out bc parsed and reso are produced by _parse_with_reso, # which localizes parsed. return start, end def _parse_with_reso(self, label: str): parsed, reso = super()._parse_with_reso(label) parsed = Timestamp(parsed) if self.tz is not None and parsed.tzinfo is None: # we special-case timezone-naive strings and timezone-aware # DatetimeIndex # https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081 parsed = parsed.tz_localize(self.tz) return parsed, reso def _disallow_mismatched_indexing(self, key) -> None: """ Check for mismatched-tzawareness indexing and re-raise as KeyError. """ # we get here with isinstance(key, self._data._recognized_scalars) try: # GH#36148 self._data._assert_tzawareness_compat(key) except TypeError as err: raise KeyError(key) from err def get_loc(self, key): """ Get integer location for requested label Returns ------- loc : int """ self._check_indexing_error(key) orig_key = key if is_valid_na_for_dtype(key, self.dtype): key = NaT if isinstance(key, self._data._recognized_scalars): # needed to localize naive datetimes self._disallow_mismatched_indexing(key) key = Timestamp(key) elif isinstance(key, str): try: parsed, reso = self._parse_with_reso(key) except (ValueError, pytz.NonExistentTimeError) as err: raise KeyError(key) from err self._disallow_mismatched_indexing(parsed) if self._can_partial_date_slice(reso): try: return self._partial_date_slice(reso, parsed) except KeyError as err: raise KeyError(key) from err key = parsed elif isinstance(key, dt.timedelta): # GH#20464 raise TypeError( f"Cannot index {type(self).__name__} with {type(key).__name__}" ) elif isinstance(key, dt.time): return self.indexer_at_time(key) else: # unrecognized type raise KeyError(key) try: return Index.get_loc(self, key) except KeyError as err: raise KeyError(orig_key) from err def _maybe_cast_slice_bound(self, label, side: str): # GH#42855 handle date here instead of get_slice_bound if isinstance(label, dt.date) and not isinstance(label, dt.datetime): # Pandas supports slicing with dates, treated as datetimes at midnight. # https://github.com/pandas-dev/pandas/issues/31501 label = Timestamp(label).to_pydatetime() label = super()._maybe_cast_slice_bound(label, side) self._data._assert_tzawareness_compat(label) return Timestamp(label) def slice_indexer(self, start=None, end=None, step=None): """ Return indexer for specified label slice. Index.slice_indexer, customized to handle time slicing. In addition to functionality provided by Index.slice_indexer, does the following: - if both `start` and `end` are instances of `datetime.time`, it invokes `indexer_between_time` - if `start` and `end` are both either string or None perform value-based selection in non-monotonic cases. """ # For historical reasons DatetimeIndex supports slices between two # instances of datetime.time as if it were applying a slice mask to # an array of (self.hour, self.minute, self.seconds, self.microsecond). if isinstance(start, dt.time) and isinstance(end, dt.time): if step is not None and step != 1: raise ValueError("Must have step size of 1 with time slices") return self.indexer_between_time(start, end) if isinstance(start, dt.time) or isinstance(end, dt.time): raise KeyError("Cannot mix time and non-time slice keys") def check_str_or_none(point) -> bool: return point is not None and not isinstance(point, str) # GH#33146 if start and end are combinations of str and None and Index is not # monotonic, we can not use Index.slice_indexer because it does not honor the # actual elements, is only searching for start and end if ( check_str_or_none(start) or check_str_or_none(end) or self.is_monotonic_increasing ): return Index.slice_indexer(self, start, end, step) mask = np.array(True) raise_mask = np.array(True) if start is not None: start_casted = self._maybe_cast_slice_bound(start, "left") mask = start_casted <= self raise_mask = start_casted == self if end is not None: end_casted = self._maybe_cast_slice_bound(end, "right") mask = (self <= end_casted) & mask raise_mask = (end_casted == self) | raise_mask if not raise_mask.any(): raise KeyError( "Value based partial slicing on non-monotonic DatetimeIndexes " "with non-existing keys is not allowed.", ) indexer = mask.nonzero()[0][::step] if len(indexer) == len(self): return slice(None) else: return indexer # -------------------------------------------------------------------- def inferred_type(self) -> str: # b/c datetime is represented as microseconds since the epoch, make # sure we can't have ambiguous indexing return "datetime64" def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]: """ Return index locations of values at particular time of day. Parameters ---------- time : datetime.time or str Time passed in either as object (datetime.time) or as string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). Returns ------- np.ndarray[np.intp] See Also -------- indexer_between_time : Get index locations of values between particular times of day. DataFrame.at_time : Select values at particular time of day. """ if asof: raise NotImplementedError("'asof' argument is not supported") if isinstance(time, str): from dateutil.parser import parse time = parse(time).time() if time.tzinfo: if self.tz is None: raise ValueError("Index must be timezone aware.") time_micros = self.tz_convert(time.tzinfo)._get_time_micros() else: time_micros = self._get_time_micros() micros = _time_to_micros(time) return (time_micros == micros).nonzero()[0] def indexer_between_time( self, start_time, end_time, include_start: bool = True, include_end: bool = True ) -> npt.NDArray[np.intp]: """ Return index locations of values between particular times of day. Parameters ---------- start_time, end_time : datetime.time, str Time passed either as object (datetime.time) or as string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p"). include_start : bool, default True include_end : bool, default True Returns ------- np.ndarray[np.intp] See Also -------- indexer_at_time : Get index locations of values at particular time of day. DataFrame.between_time : Select values between particular times of day. """ start_time = to_time(start_time) end_time = to_time(end_time) time_micros = self._get_time_micros() start_micros = _time_to_micros(start_time) end_micros = _time_to_micros(end_time) if include_start and include_end: lop = rop = operator.le elif include_start: lop = operator.le rop = operator.lt elif include_end: lop = operator.lt rop = operator.le else: lop = rop = operator.lt if start_time <= end_time: join_op = operator.and_ else: join_op = operator.or_ mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros)) return mask.nonzero()[0] def date_range( start=None, end=None, periods=None, freq=None, tz=None, normalize: bool = False, name: Hashable = None, inclusive: IntervalClosedType = "both", *, unit: str | None = None, **kwargs, ) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex. Returns the range of equally spaced time points (where the difference between any two adjacent points is specified by the given frequency) such that they all satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp., the first and last time points in that range that fall on the boundary of ``freq`` (if given as a frequency string) or that are valid for ``freq`` (if given as a :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``, ``end``, or ``freq`` is *not* specified, this missing parameter can be computed given ``periods``, the number of timesteps in the range. See the note below.) Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str, datetime.timedelta, or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive unless timezone-aware datetime-likes are passed. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 unit : str, default None Specify the desired resolution of the result. .. versionadded:: 2.0.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify timezone-aware `start` and `end`, with the default daily frequency. >>> pd.date_range( ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"), ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"), ... ) DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00', '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00', '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00', '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'], dtype='datetime64[ns, Europe/Berlin]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'M'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='M') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='M') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `inclusive` controls whether to include `start` and `end` that are on the boundary. The default, "both", includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both") DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``inclusive='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and similarly ``inclusive='neither'`` will exclude both `start` and `end`. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') **Specify a unit** >>> pd.date_range(start="2017-01-01", periods=10, freq="100AS", unit="s") DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01', '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01', '2817-01-01', '2917-01-01'], dtype='datetime64[s]', freq='100AS-JAN') """ if freq is None and com.any_none(periods, start, end): freq = "D" dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, inclusive=inclusive, unit=unit, **kwargs, ) return DatetimeIndex._simple_new(dtarr, name=name) ["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"] ) [ "components", "to_pytimedelta", "sum", "std", "median", "_format_native_types", ], ) class TimedeltaIndex(DatetimeTimedeltaMixin): """ Immutable Index of timedelta64 data. Represented internally as int64, and scalars returned Timedelta objects. Parameters ---------- data : array-like (1-dimensional), optional Optional timedelta-like data to construct index with. unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional Which is an integer/float number. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the inferred frequency upon creation. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. Attributes ---------- days seconds microseconds nanoseconds components inferred_freq Methods ------- to_pytimedelta to_series round floor ceil to_frame mean See Also -------- Index : The base pandas Index type. Timedelta : Represents a duration between two dates or times. DatetimeIndex : Index of datetime64 data. PeriodIndex : Index of Period data. timedelta_range : Create a fixed-frequency TimedeltaIndex. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. """ _typ = "timedeltaindex" _data_cls = TimedeltaArray def _engine_type(self) -> type[libindex.TimedeltaEngine]: return libindex.TimedeltaEngine _data: TimedeltaArray # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice _get_string_slice = Index._get_string_slice # error: Signature of "_resolution_obj" incompatible with supertype # "DatetimeIndexOpsMixin" def _resolution_obj(self) -> Resolution | None: # type: ignore[override] return self._data._resolution_obj # ------------------------------------------------------------------- # Constructors def __new__( cls, data=None, unit=None, freq=lib.no_default, closed=None, dtype=None, copy: bool = False, name=None, ): name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) if unit in {"Y", "y", "M"}: raise ValueError( "Units 'M', 'Y', and 'y' are no longer supported, as they do not " "represent unambiguous timedelta values durations." ) if ( isinstance(data, TimedeltaArray) and freq is lib.no_default and (dtype is None or is_dtype_equal(dtype, data.dtype)) ): if copy: data = data.copy() return cls._simple_new(data, name=name) if ( isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None and (dtype is None or is_dtype_equal(dtype, data.dtype)) ): if copy: return data.copy() else: return data._view() # - Cases checked above all return/raise before reaching here - # tdarr = TimedeltaArray._from_sequence_not_strict( data, freq=freq, unit=unit, dtype=dtype, copy=copy ) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references return cls._simple_new(tdarr, name=name, refs=refs) # ------------------------------------------------------------------- def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ return is_timedelta64_dtype(dtype) # aka self._data._is_recognized_dtype # ------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location for requested label Returns ------- loc : int, slice, or ndarray[int] """ self._check_indexing_error(key) try: key = self._data._validate_scalar(key, unbox=False) except TypeError as err: raise KeyError(key) from err return Index.get_loc(self, key) def _parse_with_reso(self, label: str): # the "with_reso" is a no-op for TimedeltaIndex parsed = Timedelta(label) return parsed, None def _parsed_string_to_bounds(self, reso, parsed: Timedelta): # reso is unused, included to match signature of DTI/PI lbound = parsed.round(parsed.resolution_string) rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns") return lbound, rbound # ------------------------------------------------------------------- def inferred_type(self) -> str: return "timedelta64" def timedelta_range( start=None, end=None, periods: int | None = None, freq=None, name=None, closed=None, *, unit: str | None = None, ) -> TimedeltaIndex: """ Return a fixed frequency TimedeltaIndex with day as the default. Parameters ---------- start : str or timedelta-like, default None Left bound for generating timedeltas. end : str or timedelta-like, default None Right bound for generating timedeltas. periods : int, default None Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. name : str, default None Name of the resulting TimedeltaIndex. closed : str, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). unit : str, default None Specify the desired resolution of the result. .. versionadded:: 2.0.0 Returns ------- TimedeltaIndex Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) **Specify a unit** >>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s") TimedeltaIndex(['1 days 00:00:00', '100001 days 00:00:00', '200001 days 00:00:00'], dtype='timedelta64[s]', freq='100000D') """ if freq is None and com.any_none(periods, start, end): freq = "D" freq, _ = dtl.maybe_infer_freq(freq) tdarr = TimedeltaArray._generate_range( start, end, periods, freq, closed=closed, unit=unit ) return TimedeltaIndex._simple_new(tdarr, name=name) The provided code snippet includes necessary dependencies for implementing the `interval_range` function. Write a Python function `def interval_range( start=None, end=None, periods=None, freq=None, name: Hashable = None, closed: IntervalClosedType = "right", ) -> IntervalIndex` to solve the following problem: Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, datetime.timedelta, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') Here is the function: def interval_range( start=None, end=None, periods=None, freq=None, name: Hashable = None, closed: IntervalClosedType = "right", ) -> IntervalIndex: """ Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, datetime.timedelta, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') """ start = maybe_box_datetimelike(start) end = maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else "D" if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) if not _is_valid_endpoint(start): raise ValueError(f"start must be numeric or datetime-like, got {start}") if not _is_valid_endpoint(end): raise ValueError(f"end must be numeric or datetime-like, got {end}") if is_float(periods): periods = int(periods) elif not is_integer(periods) and periods is not None: raise TypeError(f"periods must be a number, got {periods}") if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError as err: raise ValueError( f"freq must be numeric or convertible to DateOffset, got {freq}" ) from err # verify type compatibility if not all( [ _is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq), ] ): raise TypeError("start, end, freq need to be type compatible") # +1 to convert interval count to breaks count (n breaks = n-1 intervals) if periods is not None: periods += 1 breaks: np.ndarray | TimedeltaIndex | DatetimeIndex if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com.all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com.not_none(start, end, freq)): # np.linspace always produces float output # error: Argument 1 to "maybe_downcast_numeric" has incompatible type # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]"; # expected "ndarray[Any, Any]" [ breaks = maybe_downcast_numeric( breaks, # type: ignore[arg-type] np.dtype("int64"), ) else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): breaks = date_range(start=start, end=end, periods=periods, freq=freq) else: breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, datetime.timedelta, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]')
173,172
from __future__ import annotations from typing import ( TYPE_CHECKING, Callable, TypeVar, ) import numpy as np from pandas._typing import ( ArrayLike, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.indexes.base import Index _ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex") def _inherit_from_data( name: str, delegate: type, cache: bool = False, wrap: bool = False ): """ Make an alias for a method of the underlying ExtensionArray. Parameters ---------- name : str Name of an attribute the class should inherit from its EA parent. delegate : class cache : bool, default False Whether to convert wrapped properties into cache_readonly wrap : bool, default False Whether to wrap the inherited result in an Index. Returns ------- attribute, method, property, or cache_readonly """ attr = getattr(delegate, name) if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor": # getset_descriptor i.e. property defined in cython class if cache: def cached(self): return getattr(self._data, name) cached.__name__ = name cached.__doc__ = attr.__doc__ method = cache_readonly(cached) else: def fget(self): result = getattr(self._data, name) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result def fset(self, value) -> None: setattr(self._data, name, value) fget.__name__ = name fget.__doc__ = attr.__doc__ method = property(fget, fset) elif not callable(attr): # just a normal attribute, no wrapping method = attr else: # error: Incompatible redefinition (redefinition with type "Callable[[Any, # VarArg(Any), KwArg(Any)], Any]", original type "property") def method(self, *args, **kwargs): # type: ignore[misc] if "inplace" in kwargs: raise ValueError(f"cannot use inplace with {type(self).__name__}") result = attr(self._data, *args, **kwargs) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result # error: "property" has no attribute "__name__" method.__name__ = name # type: ignore[attr-defined] method.__doc__ = attr.__doc__ return method class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) The provided code snippet includes necessary dependencies for implementing the `inherit_names` function. Write a Python function `def inherit_names( names: list[str], delegate: type, cache: bool = False, wrap: bool = False ) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]` to solve the following problem: Class decorator to pin attributes from an ExtensionArray to a Index subclass. Parameters ---------- names : List[str] delegate : class cache : bool, default False wrap : bool, default False Whether to wrap the inherited result in an Index. Here is the function: def inherit_names( names: list[str], delegate: type, cache: bool = False, wrap: bool = False ) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]: """ Class decorator to pin attributes from an ExtensionArray to a Index subclass. Parameters ---------- names : List[str] delegate : class cache : bool, default False wrap : bool, default False Whether to wrap the inherited result in an Index. """ def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]: for name in names: meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap) setattr(cls, name, meth) return cls return wrapper
Class decorator to pin attributes from an ExtensionArray to a Index subclass. Parameters ---------- names : List[str] delegate : class cache : bool, default False wrap : bool, default False Whether to wrap the inherited result in an Index.
173,173
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... F = TypeVar("F", bound=FuncType) JoinHow = Literal["left", "right", "inner", "outer"] ensure_platform_int = algos.ensure_platform_int The provided code snippet includes necessary dependencies for implementing the `_maybe_return_indexers` function. Write a Python function `def _maybe_return_indexers(meth: F) -> F` to solve the following problem: Decorator to simplify 'return_indexers' checks in Index.join. Here is the function: def _maybe_return_indexers(meth: F) -> F: """ Decorator to simplify 'return_indexers' checks in Index.join. """ @functools.wraps(meth) def join( self, other: Index, *, how: JoinHow = "left", level=None, return_indexers: bool = False, sort: bool = False, ): join_index, lidx, ridx = meth(self, other, how=how, level=level, sort=sort) if not return_indexers: return join_index if lidx is not None: lidx = ensure_platform_int(lidx) if ridx is not None: ridx = ensure_platform_int(ridx) return join_index, lidx, ridx return cast(F, join)
Decorator to simplify 'return_indexers' checks in Index.join.
173,174
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) ABCMultiIndex = cast( "Type[MultiIndex]", create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), ) ABCPeriodIndex = cast( "Type[PeriodIndex]", create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), ) def _new_PeriodIndex(cls, **d): # GH13277 for unpickling values = d.pop("data") if values.dtype == "int64": freq = d.pop("freq", None) values = PeriodArray(values, freq=freq) return cls._simple_new(values, **d) else: return cls(values, **d) ["strftime", "start_time", "end_time"] + PeriodArray._field_ops, PeriodArray, wrap=True, ) The provided code snippet includes necessary dependencies for implementing the `_new_Index` function. Write a Python function `def _new_Index(cls, d)` to solve the following problem: This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. Here is the function: def _new_Index(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__. """ # required for backward compat, because PI can't be instantiated with # ordinals through __new__ GH #13277 if issubclass(cls, ABCPeriodIndex): from pandas.core.indexes.period import _new_PeriodIndex return _new_PeriodIndex(cls, **d) if issubclass(cls, ABCMultiIndex): if "labels" in d and "codes" not in d: # GH#23752 "labels" kwarg has been replaced with "codes" d["codes"] = d.pop("labels") # Since this was a valid MultiIndex at pickle-time, we don't need to # check validty at un-pickle time. d["verify_integrity"] = False elif "dtype" not in d and "data" in d: # Prevent Index.__new__ from conducting inference; # "data" key not in RangeIndex d["dtype"] = d["data"].dtype return cls.__new__(cls, **d)
This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__.
173,175
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) class MultiIndex(Index): """ A multi-level, or hierarchical, index object for pandas objects. Parameters ---------- levels : sequence of arrays The unique labels for each level. codes : sequence of arrays Integers for each level designating which label at each location. sortorder : optional int Level of sortedness (must be lexicographically sorted by that level). names : optional sequence of objects Names for each of the index levels. (name is accepted for compat). copy : bool, default False Copy the meta-data. verify_integrity : bool, default True Check that the levels/codes are consistent and valid. Attributes ---------- names levels codes nlevels levshape dtypes Methods ------- from_arrays from_tuples from_product from_frame set_levels set_codes to_frame to_flat_index sortlevel droplevel swaplevel reorder_levels remove_unused_levels get_level_values get_indexer get_loc get_locs get_loc_level drop See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Create a MultiIndex from the cartesian product of iterables. MultiIndex.from_tuples : Convert list of tuples to a MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Index : The base pandas Index type. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__ for more. Examples -------- A new ``MultiIndex`` is typically constructed using one of the helper methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product` and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``): >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color']) See further examples for how to construct a MultiIndex in the doc strings of the mentioned helper methods. """ _hidden_attrs = Index._hidden_attrs | frozenset() # initialize to zero-length tuples to make everything work _typ = "multiindex" _names: list[Hashable | None] = [] _levels = FrozenList() _codes = FrozenList() _comparables = ["names"] sortorder: int | None # -------------------------------------------------------------------- # Constructors def __new__( cls, levels=None, codes=None, sortorder=None, names=None, dtype=None, copy: bool = False, name=None, verify_integrity: bool = True, ) -> MultiIndex: # compat with Index if name is not None: names = name if levels is None or codes is None: raise TypeError("Must pass both levels and codes") if len(levels) != len(codes): raise ValueError("Length of levels and codes must be the same.") if len(levels) == 0: raise ValueError("Must pass non-zero number of levels/codes") result = object.__new__(cls) result._cache = {} # we've already validated levels and codes, so shortcut here result._set_levels(levels, copy=copy, validate=False) result._set_codes(codes, copy=copy, validate=False) result._names = [None] * len(levels) if names is not None: # handles name validation result._set_names(names) if sortorder is not None: result.sortorder = int(sortorder) else: result.sortorder = sortorder if verify_integrity: new_codes = result._verify_integrity() result._codes = new_codes result._reset_identity() result._references = None return result def _validate_codes(self, level: list, code: list): """ Reassign code values as -1 if their corresponding levels are NaN. Parameters ---------- code : list Code to reassign. level : list Level to check for missing values (NaN, NaT, None). Returns ------- new code where code value = -1 if it corresponds to a level with missing values (NaN, NaT, None). """ null_mask = isna(level) if np.any(null_mask): # error: Incompatible types in assignment # (expression has type "ndarray[Any, dtype[Any]]", # variable has type "List[Any]") code = np.where(null_mask[code], -1, code) # type: ignore[assignment] return code def _verify_integrity(self, codes: list | None = None, levels: list | None = None): """ Parameters ---------- codes : optional list Codes to check for validity. Defaults to current codes. levels : optional list Levels to check for validity. Defaults to current levels. Raises ------ ValueError If length of levels and codes don't match, if the codes for any level would exceed level bounds, or there are any duplicate levels. Returns ------- new codes where code value = -1 if it corresponds to a NaN level. """ # NOTE: Currently does not check, among other things, that cached # nlevels matches nor that sortorder matches actually sortorder. codes = codes or self.codes levels = levels or self.levels if len(levels) != len(codes): raise ValueError( "Length of levels and codes must match. NOTE: " "this index is in an inconsistent state." ) codes_length = len(codes[0]) for i, (level, level_codes) in enumerate(zip(levels, codes)): if len(level_codes) != codes_length: raise ValueError( f"Unequal code lengths: {[len(code_) for code_ in codes]}" ) if len(level_codes) and level_codes.max() >= len(level): raise ValueError( f"On level {i}, code max ({level_codes.max()}) >= length of " f"level ({len(level)}). NOTE: this index is in an " "inconsistent state" ) if len(level_codes) and level_codes.min() < -1: raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1") if not level.is_unique: raise ValueError( f"Level values must be unique: {list(level)} on level {i}" ) if self.sortorder is not None: if self.sortorder > _lexsort_depth(self.codes, self.nlevels): raise ValueError( "Value for sortorder must be inferior or equal to actual " f"lexsort_depth: sortorder {self.sortorder} " f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}" ) codes = [ self._validate_codes(level, code) for level, code in zip(levels, codes) ] new_codes = FrozenList(codes) return new_codes def from_arrays( cls, arrays, sortorder=None, names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default, ) -> MultiIndex: """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color']) """ error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) if is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError("all arrays must be same length") codes, levels = factorize_from_iterables(arrays) if names is lib.no_default: names = [getattr(arr, "name", None) for arr in arrays] return cls( levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False, ) def from_tuples( cls, tuples: Iterable[tuple[Hashable, ...]], sortorder: int | None = None, names: Sequence[Hashable] | Hashable = None, ) -> MultiIndex: """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError("Input must be a list / sequence of tuple-likes.") if is_iterator(tuples): tuples = list(tuples) tuples = cast(Collection[Tuple[Hashable, ...]], tuples) # handling the empty tuple cases if len(tuples) and all(isinstance(e, tuple) and not e for e in tuples): codes = [np.zeros(len(tuples))] levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype("object")))] return cls( levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False, ) arrays: list[Sequence[Hashable]] if len(tuples) == 0: if names is None: raise TypeError("Cannot infer number of levels from empty list") # error: Argument 1 to "len" has incompatible type "Hashable"; # expected "Sized" arrays = [[]] * len(names) # type: ignore[arg-type] elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = np.asarray(tuples._values) arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrs = zip(*tuples) arrays = cast(List[Sequence[Hashable]], arrs) return cls.from_arrays(arrays, sortorder=sortorder, names=names) def from_product( cls, iterables: Sequence[Iterable[Hashable]], sortorder: int | None = None, names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default, ) -> MultiIndex: """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. If not explicitly provided, names will be inferred from the elements of iterables if an element has a name attribute. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex([(0, 'green'), (0, 'purple'), (1, 'green'), (1, 'purple'), (2, 'green'), (2, 'purple')], names=['number', 'color']) """ from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") if is_iterator(iterables): iterables = list(iterables) codes, levels = factorize_from_iterables(iterables) if names is lib.no_default: names = [getattr(it, "name", None) for it in iterables] # codes are all ndarrays, so cartesian_product is lossless codes = cartesian_product(codes) return cls(levels, codes, sortorder=sortorder, names=names) def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex: """ Make a MultiIndex from a DataFrame. Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex([('HI', 'Temp'), ('HI', 'Precip'), ('NJ', 'Temp'), ('NJ', 'Precip')], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex([('HI', 'Temp'), ('HI', 'Precip'), ('NJ', 'Temp'), ('NJ', 'Precip')], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = zip(*df.items()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names) # -------------------------------------------------------------------- def _values(self) -> np.ndarray: # We override here, since our parent uses _data, which we don't use. values = [] for i in range(self.nlevels): index = self.levels[i] codes = self.codes[i] vals = index if is_categorical_dtype(vals.dtype): vals = cast("CategoricalIndex", vals) vals = vals._data._internal_get_values() if isinstance(vals.dtype, ExtensionDtype) or isinstance( vals, (ABCDatetimeIndex, ABCTimedeltaIndex) ): vals = vals.astype(object) vals = np.array(vals, copy=False) vals = algos.take_nd(vals, codes, fill_value=index._na_value) values.append(vals) arr = lib.fast_zip(values) return arr def values(self) -> np.ndarray: return self._values def array(self): """ Raises a ValueError for `MultiIndex` because there's no single array backing a MultiIndex. Raises ------ ValueError """ raise ValueError( "MultiIndex has no single backing array. Use " "'MultiIndex.to_numpy()' to get a NumPy array of tuples." ) def dtypes(self) -> Series: """ Return the dtypes as a Series for the underlying MultiIndex. """ from pandas import Series names = com.fill_missing_names([level.name for level in self.levels]) return Series([level.dtype for level in self.levels], index=Index(names)) def __len__(self) -> int: return len(self.codes[0]) def size(self) -> int: """ Return the number of elements in the underlying data. """ # override Index.size to avoid materializing _values return len(self) # -------------------------------------------------------------------- # Levels Methods def levels(self) -> FrozenList: # Use cache_readonly to ensure that self.get_locs doesn't repeatedly # create new IndexEngine # https://github.com/pandas-dev/pandas/issues/31648 result = [x._rename(name=name) for x, name in zip(self._levels, self._names)] for level in result: # disallow midx.levels[0].name = "foo" level._no_setting_name = True return FrozenList(result) def _set_levels( self, levels, *, level=None, copy: bool = False, validate: bool = True, verify_integrity: bool = False, ) -> None: # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly if validate: if len(levels) == 0: raise ValueError("Must set non-zero number of levels.") if level is None and len(levels) != self.nlevels: raise ValueError("Length of levels must match number of levels.") if level is not None and len(levels) != len(level): raise ValueError("Length of levels must match length of level.") if level is None: new_levels = FrozenList( ensure_index(lev, copy=copy)._view() for lev in levels ) else: level_numbers = [self._get_level_number(lev) for lev in level] new_levels_list = list(self._levels) for lev_num, lev in zip(level_numbers, levels): new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view() new_levels = FrozenList(new_levels_list) if verify_integrity: new_codes = self._verify_integrity(levels=new_levels) self._codes = new_codes names = self.names self._levels = new_levels if any(names): self._set_names(names) self._reset_cache() def set_levels( self, levels, *, level=None, verify_integrity: bool = True ) -> MultiIndex: """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence New level(s) to apply. level : int, level name, or sequence of int/level names (default None) Level(s) to set (None for all levels). verify_integrity : bool, default True If True, checks that levels and codes are compatible. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples( ... [ ... (1, "one"), ... (1, "two"), ... (2, "one"), ... (2, "two"), ... (3, "one"), ... (3, "two") ... ], ... names=["foo", "bar"] ... ) >>> idx MultiIndex([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two'), (3, 'one'), (3, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a', 'b', 'c'], [1, 2]]) MultiIndex([('a', 1), ('a', 2), ('b', 1), ('b', 2), ('c', 1), ('c', 2)], names=['foo', 'bar']) >>> idx.set_levels(['a', 'b', 'c'], level=0) MultiIndex([('a', 'one'), ('a', 'two'), ('b', 'one'), ('b', 'two'), ('c', 'one'), ('c', 'two')], names=['foo', 'bar']) >>> idx.set_levels(['a', 'b'], level='bar') MultiIndex([(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b'), (3, 'a'), (3, 'b')], names=['foo', 'bar']) If any of the levels passed to ``set_levels()`` exceeds the existing length, all of the values from that argument will be stored in the MultiIndex levels, though the values will be truncated in the MultiIndex output. >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]) MultiIndex([('a', 1), ('a', 2), ('b', 1), ('b', 2), ('c', 1), ('c', 2)], names=['foo', 'bar']) >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]]) """ if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) level, levels = _require_listlike(level, levels, "Levels") idx = self._view() idx._reset_identity() idx._set_levels( levels, level=level, validate=True, verify_integrity=verify_integrity ) return idx def nlevels(self) -> int: """ Integer number of levels in this MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.nlevels 3 """ return len(self._levels) def levshape(self) -> Shape: """ A tuple with the length of each level. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.levshape (1, 1, 1) """ return tuple(len(x) for x in self.levels) # -------------------------------------------------------------------- # Codes Methods def codes(self): return self._codes def _set_codes( self, codes, *, level=None, copy: bool = False, validate: bool = True, verify_integrity: bool = False, ) -> None: if validate: if level is None and len(codes) != self.nlevels: raise ValueError("Length of codes must match number of levels") if level is not None and len(codes) != len(level): raise ValueError("Length of codes must match length of levels.") if level is None: new_codes = FrozenList( _coerce_indexer_frozen(level_codes, lev, copy=copy).view() for lev, level_codes in zip(self._levels, codes) ) else: level_numbers = [self._get_level_number(lev) for lev in level] new_codes_list = list(self._codes) for lev_num, level_codes in zip(level_numbers, codes): lev = self.levels[lev_num] new_codes_list[lev_num] = _coerce_indexer_frozen( level_codes, lev, copy=copy ) new_codes = FrozenList(new_codes_list) if verify_integrity: new_codes = self._verify_integrity(codes=new_codes) self._codes = new_codes self._reset_cache() def set_codes(self, codes, *, level=None, verify_integrity: bool = True): """ Set new codes on MultiIndex. Defaults to returning new index. Parameters ---------- codes : sequence or list of sequence New codes to apply. level : int, level name, or sequence of int/level names (default None) Level(s) to set (None for all levels). verify_integrity : bool, default True If True, checks that levels and codes are compatible. Returns ------- new index (of same type and class...etc) or None The same type as the caller or None if ``inplace=True``. Examples -------- >>> idx = pd.MultiIndex.from_tuples( ... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"] ... ) >>> idx MultiIndex([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]]) MultiIndex([(2, 'one'), (1, 'one'), (2, 'two'), (1, 'two')], names=['foo', 'bar']) >>> idx.set_codes([1, 0, 1, 0], level=0) MultiIndex([(2, 'one'), (1, 'two'), (2, 'one'), (1, 'two')], names=['foo', 'bar']) >>> idx.set_codes([0, 0, 1, 1], level='bar') MultiIndex([(1, 'one'), (1, 'one'), (2, 'two'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1]) MultiIndex([(2, 'one'), (1, 'one'), (2, 'two'), (1, 'two')], names=['foo', 'bar']) """ level, codes = _require_listlike(level, codes, "Codes") idx = self._view() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) return idx # -------------------------------------------------------------------- # Index Internals def _engine(self): # Calculate the number of bits needed to represent labels in each # level, as log2 of their sizes: # NaN values are shifted to 1 and missing values in other while # calculating the indexer are shifted to 0 sizes = np.ceil( np.log2( [ len(level) + libindex.multiindex_nulls_shift # type: ignore[attr-defined] for level in self.levels ] ) ) # Sum bit counts, starting from the _right_.... lev_bits = np.cumsum(sizes[::-1])[::-1] # ... in order to obtain offsets such that sorting the combination of # shifted codes (one for each level, resulting in a unique integer) is # equivalent to sorting lexicographically the codes themselves. Notice # that each level needs to be shifted by the number of bits needed to # represent the _previous_ ones: offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64") # Check the total number of bits needed for our representation: if lev_bits[0] > 64: # The levels would overflow a 64 bit uint - use Python integers: return MultiIndexPyIntEngine(self.levels, self.codes, offsets) return MultiIndexUIntEngine(self.levels, self.codes, offsets) # Return type "Callable[..., MultiIndex]" of "_constructor" incompatible with return # type "Type[MultiIndex]" in supertype "Index" def _constructor(self) -> Callable[..., MultiIndex]: # type: ignore[override] return type(self).from_tuples def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex: names = name if name is not lib.no_default else self.names return type(self).from_tuples(values, sortorder=None, names=names) def _view(self) -> MultiIndex: result = type(self)( levels=self.levels, codes=self.codes, sortorder=self.sortorder, names=self.names, verify_integrity=False, ) result._cache = self._cache.copy() result._cache.pop("levels", None) # GH32669 return result # -------------------------------------------------------------------- # error: Signature of "copy" incompatible with supertype "Index" def copy( # type: ignore[override] self, names=None, deep: bool = False, name=None, ): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional deep : bool, default False name : Label Kept for compatibility with 1-dimensional Index. Should not be used. Returns ------- MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.copy() MultiIndex([('a', 'b', 'c')], ) """ names = self._validate_names(name=name, names=names, deep=deep) keep_id = not deep levels, codes = None, None if deep: from copy import deepcopy levels = deepcopy(self.levels) codes = deepcopy(self.codes) levels = levels if levels is not None else self.levels codes = codes if codes is not None else self.codes new_index = type(self)( levels=levels, codes=codes, sortorder=self.sortorder, names=names, verify_integrity=False, ) new_index._cache = self._cache.copy() new_index._cache.pop("levels", None) # GH32669 if keep_id: new_index._id = self._id return new_index def __array__(self, dtype=None) -> np.ndarray: """the array interface, return my values""" return self.values def view(self, cls=None): """this is defined as a copy with the same identity""" result = self.copy() result._id = self._id return result def __contains__(self, key: Any) -> bool: hash(key) try: self.get_loc(key) return True except (LookupError, TypeError, ValueError): return False def dtype(self) -> np.dtype: return np.dtype("O") def _is_memory_usage_qualified(self) -> bool: """return a boolean if we need a qualified .info display""" def f(level) -> bool: return "mixed" in level or "string" in level or "unicode" in level return any(f(level) for level in self._inferred_type_levels) # Cannot determine type of "memory_usage" def memory_usage(self, deep: bool = False) -> int: # we are overwriting our base class to avoid # computing .values here which could materialize # a tuple representation unnecessarily return self._nbytes(deep) def nbytes(self) -> int: """return the number of bytes in the underlying data""" return self._nbytes(False) def _nbytes(self, deep: bool = False) -> int: """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ # for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result # -------------------------------------------------------------------- # Rendering Methods def _formatter_func(self, tup): """ Formats each item in tup according to its level's formatter function. """ formatter_funcs = [level._formatter_func for level in self.levels] return tuple(func(val) for func, val in zip(formatter_funcs, tup)) def _format_native_types( self, *, na_rep: str = "nan", **kwargs ) -> npt.NDArray[np.object_]: new_levels = [] new_codes = [] # go through the levels and format them for level, level_codes in zip(self.levels, self.codes): level_strs = level._format_native_types(na_rep=na_rep, **kwargs) # add nan values, if there are any mask = level_codes == -1 if mask.any(): nan_index = len(level_strs) # numpy 1.21 deprecated implicit string casting level_strs = level_strs.astype(str) level_strs = np.append(level_strs, na_rep) assert not level_codes.flags.writeable # i.e. copy is needed level_codes = level_codes.copy() # make writeable level_codes[mask] = nan_index new_levels.append(level_strs) new_codes.append(level_codes) if len(new_levels) == 1: # a single-level multi-index return Index(new_levels[0].take(new_codes[0]))._format_native_types() else: # reconstruct the multi-index mi = MultiIndex( levels=new_levels, codes=new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False, ) return mi._values def format( self, name: bool | None = None, formatter: Callable | None = None, na_rep: str | None = None, names: bool = False, space: int = 2, sparsify=None, adjoin: bool = True, ) -> list: if name is not None: names = name if len(self) == 0: return [] stringified_levels = [] for lev, level_codes in zip(self.levels, self.codes): na = na_rep if na_rep is not None else _get_na_rep(lev.dtype) if len(lev) > 0: formatted = lev.take(level_codes).format(formatter=formatter) # we have some NA mask = level_codes == -1 if mask.any(): formatted = np.array(formatted, dtype=object) formatted[mask] = na formatted = formatted.tolist() else: # weird all NA case formatted = [ pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n")) for x in algos.take_nd(lev._values, level_codes) ] stringified_levels.append(formatted) result_levels = [] for lev, lev_name in zip(stringified_levels, self.names): level = [] if names: level.append( pprint_thing(lev_name, escape_chars=("\t", "\r", "\n")) if lev_name is not None else "" ) level.extend(np.array(lev, dtype=object)) result_levels.append(level) if sparsify is None: sparsify = get_option("display.multi_sparse") if sparsify: sentinel: Literal[""] | bool | lib.NoDefault = "" # GH3547 use value of sparsify as sentinel if it's "Falsey" assert isinstance(sparsify, bool) or sparsify is lib.no_default if sparsify in [False, lib.no_default]: sentinel = sparsify # little bit of a kludge job for #1217 result_levels = sparsify_labels( result_levels, start=int(names), sentinel=sentinel ) if adjoin: from pandas.io.formats.format import get_adjustment adj = get_adjustment() return adj.adjoin(space, *result_levels).split("\n") else: return result_levels # -------------------------------------------------------------------- # Names Methods def _get_names(self) -> FrozenList: return FrozenList(self._names) def _set_names(self, names, *, level=None, validate: bool = True): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None validate : bool, default True validate that the names match level lengths Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies """ # GH 15110 # Don't allow a single string for names in a MultiIndex if names is not None and not is_list_like(names): raise ValueError("Names should be list-like for a MultiIndex") names = list(names) if validate: if level is not None and len(names) != len(level): raise ValueError("Length of names must match length of level.") if level is None and len(names) != self.nlevels: raise ValueError( "Length of names must match number of levels in MultiIndex." ) if level is None: level = range(self.nlevels) else: level = [self._get_level_number(lev) for lev in level] # set the name for lev, name in zip(level, names): if name is not None: # GH 20527 # All items in 'names' need to be hashable: if not is_hashable(name): raise TypeError( f"{type(self).__name__}.name must be a hashable type" ) self._names[lev] = name # If .levels has been accessed, the names in our cache will be stale. self._reset_cache() names = property( fset=_set_names, fget=_get_names, doc=""" Names of levels in MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.names FrozenList(['x', 'y', 'z']) """, ) # -------------------------------------------------------------------- def inferred_type(self) -> str: return "mixed" def _get_level_number(self, level) -> int: count = self.names.count(level) if (count > 1) and not is_integer(level): raise ValueError( f"The name {level} occurs multiple times, use a level number" ) try: level = self.names.index(level) except ValueError as err: if not is_integer(level): raise KeyError(f"Level {level} not found") from err if level < 0: level += self.nlevels if level < 0: orig_level = level - self.nlevels raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"{orig_level} is not a valid level number" ) from err # Note: levels are zero-based elif level >= self.nlevels: raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"not {level + 1}" ) from err return level def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. """ if any(-1 in code for code in self.codes): return False if all(level.is_monotonic_increasing for level in self.levels): # If each level is sorted, we can operate on the codes directly. GH27495 return libalgos.is_lexsorted( [x.astype("int64", copy=False) for x in self.codes] ) # reversed() because lexsort() wants the most significant key last. values = [ self._get_level_values(i)._values for i in reversed(range(len(self.levels))) ] try: # error: Argument 1 to "lexsort" has incompatible type # "List[Union[ExtensionArray, ndarray[Any, Any]]]"; # expected "Union[_SupportsArray[dtype[Any]], # _NestedSequence[_SupportsArray[dtype[Any]]], bool, # int, float, complex, str, bytes, _NestedSequence[Union # [bool, int, float, complex, str, bytes]]]" sort_order = np.lexsort(values) # type: ignore[arg-type] return Index(sort_order).is_monotonic_increasing except TypeError: # we have mixed types and np.lexsort is not happy return Index(self._values).is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. """ # monotonic decreasing if and only if reverse is monotonic increasing return self[::-1].is_monotonic_increasing def _inferred_type_levels(self) -> list[str]: """return a list of the inferred types, one for each level""" return [i.inferred_type for i in self.levels] def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: shape = tuple(len(lev) for lev in self.levels) ids = get_group_index(self.codes, shape, sort=False, xnull=False) return duplicated(ids, keep) # error: Cannot override final attribute "_duplicated" # (previously declared in base class "IndexOpsMixin") _duplicated = duplicated # type: ignore[misc] def fillna(self, value=None, downcast=None): """ fillna is not implemented for MultiIndex """ raise NotImplementedError("isna is not defined for MultiIndex") def dropna(self, how: AnyAll = "any") -> MultiIndex: nans = [level_codes == -1 for level_codes in self.codes] if how == "any": indexer = np.any(nans, axis=0) elif how == "all": indexer = np.all(nans, axis=0) else: raise ValueError(f"invalid how option: {how}") new_codes = [level_codes[~indexer] for level_codes in self.codes] return self.set_codes(codes=new_codes) def _get_level_values(self, level: int, unique: bool = False) -> Index: """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int unique : bool, default False if True, drop duplicated values Returns ------- Index """ lev = self.levels[level] level_codes = self.codes[level] name = self._names[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value) return lev._shallow_copy(filled, name=name) def get_level_values(self, level): """ Return vector of label values for requested level. Length of returned vector is equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Notes ----- If the level contains missing values, the result may be casted to ``float`` with missing values specified as ``NaN``. This is because the level is converted to a regular ``Index``. Examples -------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') If a level contains missing values, the return type of the level may be cast to ``float``. >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).dtypes level_0 int64 level_1 int64 dtype: object >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).get_level_values(0) Index([1.0, nan, 2.0], dtype='float64') """ level = self._get_level_number(level) values = self._get_level_values(level) return values def unique(self, level=None): if level is None: return self.drop_duplicates() else: level = self._get_level_number(level) return self._get_level_values(level=level, unique=True) def to_frame( self, index: bool = True, name=lib.no_default, allow_duplicates: bool = False, ) -> DataFrame: """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of str, optional The passed names should substitute index level names. allow_duplicates : bool, optional default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- DataFrame See Also -------- DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']]) >>> mi MultiIndex([('a', 'c'), ('b', 'd')], ) >>> df = mi.to_frame() >>> df 0 1 a c a c b d b d >>> df = mi.to_frame(index=False) >>> df 0 1 0 a c 1 b d >>> df = mi.to_frame(name=['x', 'y']) >>> df x y a c a c b d b d """ from pandas import DataFrame if name is not lib.no_default: if not is_list_like(name): raise TypeError("'name' must be a list / sequence of column names.") if len(name) != len(self.levels): raise ValueError( "'name' should have same length as number of levels on index." ) idx_names = name else: idx_names = self._get_level_names() if not allow_duplicates and len(set(idx_names)) != len(idx_names): raise ValueError( "Cannot create duplicate column labels if allow_duplicates is False" ) # Guarantee resulting column order - PY36+ dict maintains insertion order result = DataFrame( {level: self._get_level_values(level) for level in range(len(self.levels))}, copy=False, ) result.columns = idx_names if index: result.index = self return result # error: Return type "Index" of "to_flat_index" incompatible with return type # "MultiIndex" in supertype "Index" def to_flat_index(self) -> Index: # type: ignore[override] """ Convert a MultiIndex to an Index of Tuples containing the level values. Returns ------- pd.Index Index with the MultiIndex data represented in Tuples. See Also -------- MultiIndex.from_tuples : Convert flat index back to MultiIndex. Notes ----- This method will simply return the caller if called by anything other than a MultiIndex. Examples -------- >>> index = pd.MultiIndex.from_product( ... [['foo', 'bar'], ['baz', 'qux']], ... names=['a', 'b']) >>> index.to_flat_index() Index([('foo', 'baz'), ('foo', 'qux'), ('bar', 'baz'), ('bar', 'qux')], dtype='object') """ return Index(self._values, tupleize_cols=False) def _is_lexsorted(self) -> bool: """ Return True if the codes are lexicographically sorted. Returns ------- bool Examples -------- In the below examples, the first level of the MultiIndex is sorted because a<b<c, so there is no need to look at the next level. >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ... ['d', 'e', 'f']])._is_lexsorted() True >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ... ['d', 'f', 'e']])._is_lexsorted() True In case there is a tie, the lexicographical sorting looks at the next level of the MultiIndex. >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']])._is_lexsorted() True >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']])._is_lexsorted() False >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], ... ['aa', 'bb', 'aa', 'bb']])._is_lexsorted() True >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], ... ['bb', 'aa', 'aa', 'bb']])._is_lexsorted() False """ return self._lexsort_depth == self.nlevels def _lexsort_depth(self) -> int: """ Compute and return the lexsort_depth, the number of levels of the MultiIndex that are sorted lexically Returns ------- int """ if self.sortorder is not None: return self.sortorder return _lexsort_depth(self.codes, self.nlevels) def _sort_levels_monotonic(self, raise_if_incomparable: bool = False) -> MultiIndex: """ This is an *internal* function. Create a new MultiIndex from the current to monotonically sorted items IN the levels. This does not actually make the entire MultiIndex monotonic, JUST the levels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex([('a', 'bb'), ('a', 'aa'), ('b', 'bb'), ('b', 'aa')], ) >>> mi.sort_values() MultiIndex([('a', 'aa'), ('a', 'bb'), ('b', 'aa'), ('b', 'bb')], ) """ if self._is_lexsorted() and self.is_monotonic_increasing: return self new_levels = [] new_codes = [] for lev, level_codes in zip(self.levels, self.codes): if not lev.is_monotonic_increasing: try: # indexer to reorder the levels indexer = lev.argsort() except TypeError: if raise_if_incomparable: raise else: lev = lev.take(indexer) # indexer to reorder the level codes indexer = ensure_platform_int(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) level_codes = algos.take_nd(ri, level_codes) new_levels.append(lev) new_codes.append(level_codes) return MultiIndex( new_levels, new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False, ) def remove_unused_levels(self) -> MultiIndex: """ Create new MultiIndex from current that removes unused levels. Unused level(s) means levels that are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_product([range(2), list('ab')]) >>> mi MultiIndex([(0, 'a'), (0, 'b'), (1, 'a'), (1, 'b')], ) >>> mi[2:] MultiIndex([(1, 'a'), (1, 'b')], ) The 0 from the first level is not represented and can be removed >>> mi2 = mi[2:].remove_unused_levels() >>> mi2.levels FrozenList([[1], ['a', 'b']]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: if lev.isna().any() and len(uniques) == len(lev): break # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self.view() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result # -------------------------------------------------------------------- # Pickling Methods def __reduce__(self): """Necessary for making this object picklable""" d = { "levels": list(self.levels), "codes": list(self.codes), "sortorder": self.sortorder, "names": list(self.names), } return ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- def __getitem__(self, key): if is_scalar(key): key = com.cast_scalar_indexer(key) retval = [] for lev, level_codes in zip(self.levels, self.codes): if level_codes[key] == -1: retval.append(np.nan) else: retval.append(lev[level_codes[key]]) return tuple(retval) else: # in general cannot be sure whether the result will be sorted sortorder = None if com.is_bool_indexer(key): key = np.asarray(key, dtype=bool) sortorder = self.sortorder elif isinstance(key, slice): if key.step is None or key.step > 0: sortorder = self.sortorder elif isinstance(key, Index): key = np.asarray(key) new_codes = [level_codes[key] for level_codes in self.codes] return MultiIndex( levels=self.levels, codes=new_codes, names=self.names, sortorder=sortorder, verify_integrity=False, ) def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex: """ Fastpath for __getitem__ when we know we have a slice. """ sortorder = None if slobj.step is None or slobj.step > 0: sortorder = self.sortorder new_codes = [level_codes[slobj] for level_codes in self.codes] return type(self)( levels=self.levels, codes=new_codes, names=self._names, sortorder=sortorder, verify_integrity=False, ) def take( self: MultiIndex, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ) -> MultiIndex: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) na_value = -1 taken = [lab.take(indices) for lab in self.codes] if allow_fill: mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked return MultiIndex( levels=self.levels, codes=taken, names=self.names, verify_integrity=False ) def append(self, other): """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index The combined index. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b']]) >>> mi MultiIndex([('a', 'b')], ) >>> mi.append(mi) MultiIndex([('a', 'b'), ('a', 'b')], ) """ if not isinstance(other, (list, tuple)): other = [other] if all( (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other ): arrays, names = [], [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) single_label_name = all(label.name == x.name for x in appended) names.append(label.name if single_label_name else None) return MultiIndex.from_arrays(arrays, names=names) to_concat = (self._values,) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: # We only get here if other contains at least one index with tuples, # setting names to None automatically return MultiIndex.from_tuples(new_tuples) except (TypeError, IndexError): return Index(new_tuples) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: if len(args) == 0 and len(kwargs) == 0: # lexsort is significantly faster than self._values.argsort() target = self._sort_levels_monotonic(raise_if_incomparable=True) return lexsort_indexer(target._get_codes_for_sorting()) return self._values.argsort(*args, **kwargs) def repeat(self, repeats: int, axis=None) -> MultiIndex: nv.validate_repeat((), {"axis": axis}) # error: Incompatible types in assignment (expression has type "ndarray", # variable has type "int") repeats = ensure_platform_int(repeats) # type: ignore[assignment] return MultiIndex( levels=self.levels, codes=[ level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats) for level_codes in self.codes ], names=self.names, sortorder=self.sortorder, verify_integrity=False, ) # error: Signature of "drop" incompatible with supertype "Index" def drop( # type: ignore[override] self, codes, level: Index | np.ndarray | Iterable[Hashable] | None = None, errors: IgnoreRaise = "raise", ) -> MultiIndex: """ Make new MultiIndex with passed list of codes deleted. Parameters ---------- codes : array-like Must be a list of tuples when level is not specified. level : int or level name, default None errors : str, default 'raise' Returns ------- MultiIndex """ if level is not None: return self._drop_from_level(codes, level, errors) if not isinstance(codes, (np.ndarray, Index)): try: codes = com.index_labels_to_array(codes, dtype=np.dtype("object")) except ValueError: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): step = loc.step if loc.step is not None else 1 inds.extend(range(loc.start, loc.stop, step)) elif com.is_bool_indexer(loc): if self._lexsort_depth == 0: warnings.warn( "dropping on a non-lexsorted multi-index " "without a level parameter may impact performance.", PerformanceWarning, stacklevel=find_stack_level(), ) loc = loc.nonzero()[0] inds.extend(loc) else: msg = f"unsupported indexer of type {type(loc)}" raise AssertionError(msg) except KeyError: if errors != "ignore": raise return self.delete(inds) def _drop_from_level( self, codes, level, errors: IgnoreRaise = "raise" ) -> MultiIndex: codes = com.index_labels_to_array(codes) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(codes) # If nan should be dropped it will equal -1 here. We have to check which values # are not nan and equal -1, this means they are missing in the index nan_codes = isna(codes) values[(np.equal(nan_codes, False)) & (values == -1)] = -2 if index.shape[0] == self.shape[0]: values[np.equal(nan_codes, True)] = -2 not_found = codes[values == -2] if len(not_found) != 0 and errors != "ignore": raise KeyError(f"labels {not_found} not found in level") mask = ~algos.isin(self.codes[i], values) return self[mask] def swaplevel(self, i=-2, j=-1) -> MultiIndex: """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. DataFrame.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex([('a', 'bb'), ('a', 'aa'), ('b', 'bb'), ('b', 'aa')], ) >>> mi.swaplevel(0, 1) MultiIndex([('bb', 'a'), ('aa', 'a'), ('bb', 'b'), ('aa', 'b')], ) """ new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) def reorder_levels(self, order) -> MultiIndex: """ Rearrange levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). Returns ------- MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y']) >>> mi MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.reorder_levels(order=[1, 0]) MultiIndex([(3, 1), (4, 2)], names=['y', 'x']) >>> mi.reorder_levels(order=['y', 'x']) MultiIndex([(3, 1), (4, 2)], names=['y', 'x']) """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError( f"Length of order must be same as number of levels ({self.nlevels}), " f"got {len(order)}" ) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) def _get_codes_for_sorting(self) -> list[Categorical]: """ we are categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid """ def cats(level_codes): return np.arange( np.array(level_codes).max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype, ) return [ Categorical.from_codes(level_codes, cats(level_codes), ordered=True) for level_codes in self.codes ] def sortlevel( self, level: IndexLabel = 0, ascending: bool | list[bool] = True, sort_remaining: bool = True, ) -> tuple[MultiIndex, npt.NDArray[np.intp]]: """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level. If list-like must be names or ints of levels. ascending : bool, default True False to sort in descending order. Can also be a list to specify a directed ordering. sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]]) >>> mi MultiIndex([(0, 2), (0, 1)], ) >>> mi.sortlevel() (MultiIndex([(0, 1), (0, 2)], ), array([1, 0])) >>> mi.sortlevel(sort_remaining=False) (MultiIndex([(0, 2), (0, 1)], ), array([0, 1])) >>> mi.sortlevel(1) (MultiIndex([(0, 1), (0, 2)], ), array([1, 0])) >>> mi.sortlevel(1, ascending=False) (MultiIndex([(0, 2), (0, 1)], ), array([0, 1])) """ if not is_list_like(level): level = [level] # error: Item "Hashable" of "Union[Hashable, Sequence[Hashable]]" has # no attribute "__iter__" (not iterable) level = [ self._get_level_number(lev) for lev in level # type: ignore[union-attr] ] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") indexer = lexsort_indexer( [self.codes[lev] for lev in level], orders=ascending ) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex( codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False, ) return new_index, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: try: target = MultiIndex.from_tuples(target) except TypeError: # not all tuples, see test_constructor_dict_multiindex_reindex_flat return target target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool) -> Index: if ( preserve_names and target.nlevels == self.nlevels and target.names != self.names ): target = target.copy(deep=False) target.names = self.names return target # -------------------------------------------------------------------- # Indexing Methods def _check_indexing_error(self, key) -> None: if not is_hashable(key) or is_iterator(key): # We allow tuples if they are hashable, whereas other Index # subclasses require scalar. # We have to explicitly exclude generators, as these are hashable. raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should integer key(s) be treated as positional? """ # GH#33355 return self.levels[0]._should_fallback_to_positional def _get_indexer_strict( self, key, axis_name: str ) -> tuple[Index, npt.NDArray[np.intp]]: keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if len(keyarr) and not isinstance(keyarr[0], tuple): indexer = self._get_indexer_level_0(keyarr) self._raise_if_missing(key, indexer, axis_name) return self[indexer], indexer return super()._get_indexer_strict(key, axis_name) def _raise_if_missing(self, key, indexer, axis_name: str) -> None: keyarr = key if not isinstance(key, Index): keyarr = com.asarray_tuplesafe(key) if len(keyarr) and not isinstance(keyarr[0], tuple): # i.e. same condition for special case in MultiIndex._get_indexer_strict mask = indexer == -1 if mask.any(): check = self.levels[0].get_indexer(keyarr) cmask = check == -1 if cmask.any(): raise KeyError(f"{keyarr[cmask]} not in index") # We get here when levels still contain values which are not # actually in Index anymore raise KeyError(f"{keyarr} not in index") else: return super()._raise_if_missing(key, indexer, axis_name) def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]: """ Optimized equivalent to `self.get_level_values(0).get_indexer_for(target)`. """ lev = self.levels[0] codes = self._codes[0] cat = Categorical.from_codes(codes=codes, categories=lev) ci = Index(cat) return ci.get_indexer_for(target) def get_slice_bound( self, label: Hashable | Sequence[Hashable], side: Literal["left", "right"], ) -> int: """ For an ordered MultiIndex, compute slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if `side=='right') position of given label. Parameters ---------- label : object or tuple of objects side : {'left', 'right'} Returns ------- int Index of label. Notes ----- This method only works if level 0 index of the MultiIndex is lexsorted. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')]) Get the locations from the leftmost 'b' in the first level until the end of the multiindex: >>> mi.get_slice_bound('b', side="left") 1 Like above, but if you get the locations from the rightmost 'b' in the first level and 'f' in the second level: >>> mi.get_slice_bound(('b','f'), side="right") 3 See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ if not isinstance(label, tuple): label = (label,) return self._partial_tup_index(label, side=side) # pylint: disable-next=useless-parent-delegation def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step) def _partial_tup_index(self, tup: tuple, side: Literal["left", "right"] = "left"): if len(tup) > self._lexsort_depth: raise UnsortedIndexError( f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth " f"({self._lexsort_depth})" ) n = len(tup) start, end = 0, len(self) zipped = zip(tup, self.levels, self.codes) for k, (lab, lev, level_codes) in enumerate(zipped): section = level_codes[start:end] if lab not in lev and not isna(lab): # short circuit try: loc = algos.searchsorted(lev, lab, side=side) except TypeError as err: # non-comparable e.g. test_slice_locs_with_type_mismatch raise TypeError(f"Level type mismatch: {lab}") from err if not is_integer(loc): # non-comparable level, e.g. test_groupby_example raise TypeError(f"Level type mismatch: {lab}") if side == "right" and loc >= 0: loc -= 1 return start + algos.searchsorted(section, loc, side=side) idx = self._get_loc_single_level_index(lev, lab) if isinstance(idx, slice) and k < n - 1: # Get start and end value from slice, necessary when a non-integer # interval is given as input GH#37707 start = idx.start end = idx.stop elif k < n - 1: # error: Incompatible types in assignment (expression has type # "Union[ndarray[Any, dtype[signedinteger[Any]]] end = start + algos.searchsorted( # type: ignore[assignment] section, idx, side="right" ) # error: Incompatible types in assignment (expression has type # "Union[ndarray[Any, dtype[signedinteger[Any]]] start = start + algos.searchsorted( # type: ignore[assignment] section, idx, side="left" ) elif isinstance(idx, slice): idx = idx.start return start + algos.searchsorted(section, idx, side=side) else: return start + algos.searchsorted(section, idx, side=side) def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int: """ If key is NA value, location of index unify as -1. Parameters ---------- level_index: Index key : label Returns ------- loc : int If key is NA value, loc is -1 Else, location of key in index. See Also -------- Index.get_loc : The get_loc method for (single-level) index. """ if is_scalar(key) and isna(key): # TODO: need is_valid_na_for_dtype(key, level_index.dtype) return -1 else: return level_index.get_loc(key) def get_loc(self, key): """ Get location for a label or a tuple of labels. The location is returned as an integer/slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) Returns ------- int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. Notes ----- The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 """ self._check_indexing_error(key) def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != np.intp: return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype="bool") mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError( f"Key length ({keylen}) exceeds index depth ({self.nlevels})" ) if keylen == self.nlevels and self.is_unique: # TODO: what if we have an IntervalIndex level? # i.e. do we need _index_as_unique on that level? try: return self._engine.get_loc(key) except TypeError: # e.g. test_partial_slicing_with_multiindex partial string slicing loc, _ = self.get_loc_level(key, list(range(self.nlevels))) return loc # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self._lexsort_depth lead_key, follow_key = key[:i], key[i:] if not lead_key: start = 0 stop = len(self) else: try: start, stop = self.slice_locs(lead_key, lead_key) except TypeError as err: # e.g. test_groupby_example key = ((0, 0, 1, 2), "new_col") # when self has 5 integer levels raise KeyError(key) from err if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn( "indexing past lexsort depth may impact performance.", PerformanceWarning, stacklevel=find_stack_level(), ) loc = np.arange(start, stop, dtype=np.intp) for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self._get_loc_single_level_index( self.levels[i], k ) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop) def get_loc_level(self, key, level: IndexLabel = 0, drop_level: bool = True): """ Get location and sliced index for requested label(s)/level(s). Parameters ---------- key : label or sequence of labels level : int/level name or list thereof, optional drop_level : bool, default True If ``False``, the resulting index will not drop any level. Returns ------- tuple A 2-tuple where the elements : Element 0: int, slice object or boolean array. Element 1: The resulting sliced multiindex/index. If the key contains all levels, this will be ``None``. See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')], ... names=['A', 'B']) >>> mi.get_loc_level('b') (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B')) >>> mi.get_loc_level('e', level='B') (array([False, True, False]), Index(['b'], dtype='object', name='A')) >>> mi.get_loc_level(['b', 'e']) (1, None) """ if not isinstance(level, (list, tuple)): level = self._get_level_number(level) else: level = [self._get_level_number(lev) for lev in level] loc, mi = self._get_loc_level(key, level=level) if not drop_level: if lib.is_integer(loc): mi = self[loc : loc + 1] else: mi = self[loc] return loc, mi def _get_loc_level(self, key, level: int | list[int] = 0): """ get_loc_level but with `level` known to be positional, not name-based. """ # different name to distinguish from maybe_droplevels def maybe_mi_droplevels(indexer, levels): """ If level does not exist or all levels were dropped, the exception has to be handled outside. """ new_index = self[indexer] for i in sorted(levels, reverse=True): new_index = new_index._drop_level_numbers([i]) return new_index if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError( "Key for location must have same length as number of levels" ) result = None for lev, k in zip(level, key): loc, new_index = self._get_loc_level(k, level=lev) if isinstance(loc, slice): mask = np.zeros(len(self), dtype=bool) mask[loc] = True loc = mask result = loc if result is None else result & loc try: # FIXME: we should be only dropping levels on which we are # scalar-indexing mi = maybe_mi_droplevels(result, level) except ValueError: # droplevel failed because we tried to drop all levels, # i.e. len(level) == self.nlevels mi = self[result] return result, mi # kludge for #1796 if isinstance(key, list): key = tuple(key) if isinstance(key, tuple) and level == 0: try: # Check if this tuple is a single key in our first level if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) new_index = maybe_mi_droplevels(indexer, [0]) return indexer, new_index except (TypeError, InvalidIndexError): pass if not any(isinstance(k, slice) for k in key): if len(key) == self.nlevels and self.is_unique: # Complete key in unique index -> standard get_loc try: return (self._engine.get_loc(key), None) except KeyError as err: raise KeyError(key) from err except TypeError: # e.g. partial string indexing # test_partial_string_timestamp_multiindex pass # partial selection indexer = self.get_loc(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] if len(ilevels) == self.nlevels: if is_integer(indexer): # we are dropping all levels return indexer, None # TODO: in some cases we still need to drop some levels, # e.g. test_multiindex_perf_warn # test_partial_string_timestamp_multiindex ilevels = [ i for i in range(len(key)) if ( not isinstance(key[i], str) or not self.levels[i]._supports_partial_string_indexing ) and key[i] != slice(None, None) ] if len(ilevels) == self.nlevels: # TODO: why? ilevels = [] return indexer, maybe_mi_droplevels(indexer, ilevels) else: indexer = None for i, k in enumerate(key): if not isinstance(k, slice): loc_level = self._get_level_indexer(k, level=i) if isinstance(loc_level, slice): if com.is_null_slice(loc_level) or com.is_full_slice( loc_level, len(self) ): # everything continue # e.g. test_xs_IndexSlice_argument_not_implemented k_index = np.zeros(len(self), dtype=bool) k_index[loc_level] = True else: k_index = loc_level elif com.is_null_slice(k): # taking everything, does not affect `indexer` below continue else: # FIXME: this message can be inaccurate, e.g. # test_series_varied_multiindex_alignment raise TypeError(f"Expected label or tuple of labels, got {key}") if indexer is None: indexer = k_index else: indexer &= k_index if indexer is None: indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_mi_droplevels(indexer, ilevels) else: indexer = self._get_level_indexer(key, level=level) if ( isinstance(key, str) and self.levels[level]._supports_partial_string_indexing ): # check to see if we did an exact lookup vs sliced check = self.levels[level].get_loc(key) if not is_integer(check): # e.g. test_partial_string_timestamp_multiindex return indexer, self[indexer] try: result_index = maybe_mi_droplevels(indexer, [level]) except ValueError: result_index = self[indexer] return indexer, result_index def _get_level_indexer( self, key, level: int = 0, indexer: npt.NDArray[np.bool_] | None = None ): # `level` kwarg is _always_ positional, never name # return a boolean array or slice showing where the key is # in the totality of values # if the indexer is provided, then use this level_index = self.levels[level] level_codes = self.codes[level] def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): # Compute a bool indexer to identify the positions to take. # If we have an existing indexer, we only need to examine the # subset of positions where the existing indexer is True. if indexer is not None: # we only need to look at the subset of codes where the # existing indexer equals True codes = codes[indexer] if step is None or step == 1: new_indexer = (codes >= start) & (codes < stop) else: r = np.arange(start, stop, step, dtype=codes.dtype) new_indexer = algos.isin(codes, r) if indexer is None: return new_indexer indexer = indexer.copy() indexer[indexer] = new_indexer return indexer if isinstance(key, slice): # handle a slice, returning a slice if we can # otherwise a boolean indexer step = key.step is_negative_step = step is not None and step < 0 try: if key.start is not None: start = level_index.get_loc(key.start) elif is_negative_step: start = len(level_index) - 1 else: start = 0 if key.stop is not None: stop = level_index.get_loc(key.stop) elif is_negative_step: stop = 0 elif isinstance(start, slice): stop = len(level_index) else: stop = len(level_index) - 1 except KeyError: # we have a partial slice (like looking up a partial date # string) start = stop = level_index.slice_indexer(key.start, key.stop, key.step) step = start.step if isinstance(start, slice) or isinstance(stop, slice): # we have a slice for start and/or stop # a partial date slicer on a DatetimeIndex generates a slice # note that the stop ALREADY includes the stopped point (if # it was a string sliced) start = getattr(start, "start", start) stop = getattr(stop, "stop", stop) return convert_indexer(start, stop, step) elif level > 0 or self._lexsort_depth == 0 or step is not None: # need to have like semantics here to right # searching as when we are using a slice # so adjust the stop by 1 (so we include stop) stop = (stop - 1) if is_negative_step else (stop + 1) return convert_indexer(start, stop, step) else: # sorted, so can return slice object -> view i = algos.searchsorted(level_codes, start, side="left") j = algos.searchsorted(level_codes, stop, side="right") return slice(i, j, step) else: idx = self._get_loc_single_level_index(level_index, key) if level > 0 or self._lexsort_depth == 0: # Desired level is not sorted if isinstance(idx, slice): # test_get_loc_partial_timestamp_multiindex locs = (level_codes >= idx.start) & (level_codes < idx.stop) return locs locs = np.array(level_codes == idx, dtype=bool, copy=False) if not locs.any(): # The label is present in self.levels[level] but unused: raise KeyError(key) return locs if isinstance(idx, slice): # e.g. test_partial_string_timestamp_multiindex start = algos.searchsorted(level_codes, idx.start, side="left") # NB: "left" here bc of slice semantics end = algos.searchsorted(level_codes, idx.stop, side="left") else: start = algos.searchsorted(level_codes, idx, side="left") end = algos.searchsorted(level_codes, idx, side="right") if start == end: # The label is present in self.levels[level] but unused: raise KeyError(key) return slice(start, end) def get_locs(self, seq): """ Get location for a sequence of labels. Parameters ---------- seq : label, slice, list, mask or a sequence of such You should use one of the above for each level. If a level should not be used, set it to ``slice(None)``. Returns ------- numpy.ndarray NumPy array of integers suitable for passing to iloc. See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_locs('b') # doctest: +SKIP array([1, 2], dtype=int64) >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP array([1, 2], dtype=int64) >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP array([2], dtype=int64) """ # must be lexsorted to at least as many levels true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self._lexsort_depth: raise UnsortedIndexError( "MultiIndex slicing requires the index to be lexsorted: slicing " f"on levels {true_slices}, lexsort depth {self._lexsort_depth}" ) if any(x is Ellipsis for x in seq): raise NotImplementedError( "MultiIndex does not support indexing with Ellipsis" ) n = len(self) def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]: if isinstance(indexer, slice): new_indexer = np.zeros(n, dtype=np.bool_) new_indexer[indexer] = True return new_indexer return indexer # a bool indexer for the positions we want to take indexer: npt.NDArray[np.bool_] | None = None for i, k in enumerate(seq): lvl_indexer: npt.NDArray[np.bool_] | slice | None = None if com.is_bool_indexer(k): if len(k) != n: raise ValueError( "cannot index with a boolean indexer that " "is not the same length as the index" ) lvl_indexer = np.asarray(k) elif is_list_like(k): # a collection of labels to include from this level (these are or'd) # GH#27591 check if this is a single tuple key in the level try: lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) except (InvalidIndexError, TypeError, KeyError) as err: # InvalidIndexError e.g. non-hashable, fall back to treating # this as a sequence of labels # KeyError it can be ambiguous if this is a label or sequence # of labels # github.com/pandas-dev/pandas/issues/39424#issuecomment-871626708 for x in k: if not is_hashable(x): # e.g. slice raise err # GH 39424: Ignore not founds # GH 42351: No longer ignore not founds & enforced in 2.0 # TODO: how to handle IntervalIndex level? (no test cases) item_indexer = self._get_level_indexer( x, level=i, indexer=indexer ) if lvl_indexer is None: lvl_indexer = _to_bool_indexer(item_indexer) elif isinstance(item_indexer, slice): lvl_indexer[item_indexer] = True # type: ignore[index] else: lvl_indexer |= item_indexer if lvl_indexer is None: # no matches we are done # test_loc_getitem_duplicates_multiindex_empty_indexer return np.array([], dtype=np.intp) elif com.is_null_slice(k): # empty slice if indexer is None and i == len(seq) - 1: return np.arange(n, dtype=np.intp) continue else: # a slice or a single label lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) # update indexer lvl_indexer = _to_bool_indexer(lvl_indexer) if indexer is None: indexer = lvl_indexer else: indexer &= lvl_indexer if not np.any(indexer) and np.any(lvl_indexer): raise KeyError(seq) # empty indexer if indexer is None: return np.array([], dtype=np.intp) pos_indexer = indexer.nonzero()[0] return self._reorder_indexer(seq, pos_indexer) # -------------------------------------------------------------------- def _reorder_indexer( self, seq: tuple[Scalar | Iterable | AnyArrayLike, ...], indexer: npt.NDArray[np.intp], ) -> npt.NDArray[np.intp]: """ Reorder an indexer of a MultiIndex (self) so that the labels are in the same order as given in seq Parameters ---------- seq : label/slice/list/mask or a sequence of such indexer: a position indexer of self Returns ------- indexer : a sorted position indexer of self ordered as seq """ # check if sorting is necessary need_sort = False for i, k in enumerate(seq): if com.is_null_slice(k) or com.is_bool_indexer(k) or is_scalar(k): pass elif is_list_like(k): if len(k) <= 1: # type: ignore[arg-type] pass elif self._is_lexsorted(): # If the index is lexsorted and the list_like label # in seq are sorted then we do not need to sort k_codes = self.levels[i].get_indexer(k) k_codes = k_codes[k_codes >= 0] # Filter absent keys # True if the given codes are not ordered need_sort = (k_codes[:-1] > k_codes[1:]).any() else: need_sort = True elif isinstance(k, slice): if self._is_lexsorted(): need_sort = k.step is not None and k.step < 0 else: need_sort = True else: need_sort = True if need_sort: break if not need_sort: return indexer n = len(self) keys: tuple[np.ndarray, ...] = () # For each level of the sequence in seq, map the level codes with the # order they appears in a list-like sequence # This mapping is then use to reorder the indexer for i, k in enumerate(seq): if is_scalar(k): # GH#34603 we want to treat a scalar the same as an all equal list k = [k] if com.is_bool_indexer(k): new_order = np.arange(n)[indexer] elif is_list_like(k): # Generate a map with all level codes as sorted initially k = algos.unique(k) key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len( self.levels[i] ) # Set order as given in the indexer list level_indexer = self.levels[i].get_indexer(k) level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys key_order_map[level_indexer] = np.arange(len(level_indexer)) new_order = key_order_map[self.codes[i][indexer]] elif isinstance(k, slice) and k.step is not None and k.step < 0: # flip order for negative step new_order = np.arange(n)[::-1][indexer] elif isinstance(k, slice) and k.start is None and k.stop is None: # slice(None) should not determine order GH#31330 new_order = np.ones((n,), dtype=np.intp)[indexer] else: # For all other case, use the same order as the level new_order = np.arange(n)[indexer] keys = (new_order,) + keys # Find the reordering using lexsort on the keys mapping ind = np.lexsort(keys) return indexer[ind] def truncate(self, before=None, after=None) -> MultiIndex: """ Slice index between two labels / tuples, return new MultiIndex. Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start. after : label or tuple, can be partial. Default None None defaults to end. Returns ------- MultiIndex The truncated MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['x', 'y', 'z']]) >>> mi MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')], ) >>> mi.truncate(before='a', after='b') MultiIndex([('a', 'x'), ('b', 'y')], ) """ if after and before and after < before: raise ValueError("after < before") i, j = self.levels[0].slice_locs(before, after) left, right = self.slice_locs(before, after) new_levels = list(self.levels) new_levels[0] = new_levels[0][i:j] new_codes = [level_codes[left:right] for level_codes in self.codes] new_codes[0] = new_codes[0] - i return MultiIndex( levels=new_levels, codes=new_codes, names=self._names, verify_integrity=False, ) def equals(self, other: object) -> bool: """ Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See Also -------- equal_levels """ if self.is_(other): return True if not isinstance(other, Index): return False if len(self) != len(other): return False if not isinstance(other, MultiIndex): # d-level MultiIndex can equal d-tuple Index if not self._should_compare(other): # object Index or Categorical[object] may contain tuples return False return array_equivalent(self._values, other._values) if self.nlevels != other.nlevels: return False for i in range(self.nlevels): self_codes = self.codes[i] other_codes = other.codes[i] self_mask = self_codes == -1 other_mask = other_codes == -1 if not np.array_equal(self_mask, other_mask): return False self_codes = self_codes[~self_mask] self_values = self.levels[i]._values.take(self_codes) other_codes = other_codes[~other_mask] other_values = other.levels[i]._values.take(other_codes) # since we use NaT both datetime64 and timedelta64 we can have a # situation where a level is typed say timedelta64 in self (IOW it # has other values than NaT) but types datetime64 in other (where # its all NaT) but these are equivalent if len(self_values) == 0 and len(other_values) == 0: continue if not isinstance(self_values, np.ndarray): # i.e. ExtensionArray if not self_values.equals(other_values): return False elif not isinstance(other_values, np.ndarray): # i.e. other is ExtensionArray if not other_values.equals(self_values): return False else: if not array_equivalent(self_values, other_values): return False return True def equal_levels(self, other: MultiIndex) -> bool: """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True # -------------------------------------------------------------------- # Set Methods def _union(self, other, sort) -> MultiIndex: other, result_names = self._convert_can_do_setop(other) if other.has_duplicates: # This is only necessary if other has dupes, # otherwise difference is faster result = super()._union(other, sort) if isinstance(result, MultiIndex): return result return MultiIndex.from_arrays( zip(*result), sortorder=None, names=result_names ) else: right_missing = other.difference(self, sort=False) if len(right_missing): result = self.append(right_missing) else: result = self._get_reconciled_name_object(other) if sort is not False: try: result = result.sort_values() except TypeError: if sort is True: raise warnings.warn( "The values in the array are unorderable. " "Pass `sort=False` to suppress this warning.", RuntimeWarning, stacklevel=find_stack_level(), ) return result def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return is_object_dtype(dtype) def _get_reconciled_name_object(self, other) -> MultiIndex: """ If the result of a set operation will be self, return self, unless the names change, in which case make a shallow copy of self. """ names = self._maybe_match_names(other) if self.names != names: # error: Cannot determine type of "rename" return self.rename(names) # type: ignore[has-type] return self def _maybe_match_names(self, other): """ Try to find common names to attach to the result of an operation between a and b. Return a consensus list of names if they match at least partly or list of None if they have completely different names. """ if len(self.names) != len(other.names): return [None] * len(self.names) names = [] for a_name, b_name in zip(self.names, other.names): if a_name == b_name: names.append(a_name) else: # TODO: what if they both have np.nan for their names? names.append(None) return names def _wrap_intersection_result(self, other, result) -> MultiIndex: _, result_names = self._convert_can_do_setop(other) return result.set_names(result_names) def _wrap_difference_result(self, other, result: MultiIndex) -> MultiIndex: _, result_names = self._convert_can_do_setop(other) if len(result) == 0: return result.remove_unused_levels().set_names(result_names) else: return result.set_names(result_names) def _convert_can_do_setop(self, other): result_names = self.names if not isinstance(other, Index): if len(other) == 0: return self[:0], self.names else: msg = "other must be a MultiIndex or a list of tuples" try: other = MultiIndex.from_tuples(other, names=self.names) except (ValueError, TypeError) as err: # ValueError raised by tuples_to_object_array if we # have non-object dtype raise TypeError(msg) from err else: result_names = get_unanimous_names(self, other) return other, result_names # -------------------------------------------------------------------- def astype(self, dtype, copy: bool = True): dtype = pandas_dtype(dtype) if is_categorical_dtype(dtype): msg = "> 1 ndim Categorical are not supported at this time" raise NotImplementedError(msg) if not is_object_dtype(dtype): raise TypeError( "Setting a MultiIndex dtype to anything other than object " "is not supported" ) if copy is True: return self._view() return self def _validate_fill_value(self, item): if isinstance(item, MultiIndex): # GH#43212 if item.nlevels != self.nlevels: raise ValueError("Item must have length equal to number of levels.") return item._values elif not isinstance(item, tuple): # Pad the key with empty strings if lower levels of the key # aren't specified: item = (item,) + ("",) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError("Item must have length equal to number of levels.") return item def putmask(self, mask, value: MultiIndex) -> MultiIndex: """ Return a new MultiIndex of the values set with the mask. Parameters ---------- mask : array like value : MultiIndex Must either be the same length as self or length one Returns ------- MultiIndex """ mask, noop = validate_putmask(self, mask) if noop: return self.copy() if len(mask) == len(value): subset = value[mask].remove_unused_levels() else: subset = value.remove_unused_levels() new_levels = [] new_codes = [] for i, (value_level, level, level_codes) in enumerate( zip(subset.levels, self.levels, self.codes) ): new_level = level.union(value_level, sort=False) value_codes = new_level.get_indexer_for(subset.get_level_values(i)) new_code = ensure_int64(level_codes) new_code[mask] = value_codes new_levels.append(new_level) new_codes.append(new_code) return MultiIndex( levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False ) def insert(self, loc: int, item) -> MultiIndex: """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ item = self._validate_fill_value(item) new_levels = [] new_codes = [] for k, level, level_codes in zip(item, self.levels, self.codes): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc)) return MultiIndex( levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False ) def delete(self, loc) -> MultiIndex: """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex( levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False, ) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: if level is None: if len(values) == 0: return np.zeros((len(self),), dtype=np.bool_) if not isinstance(values, MultiIndex): values = MultiIndex.from_tuples(values) return values.unique().get_indexer_for(self) != -1 else: num = self._get_level_number(level) levs = self.get_level_values(num) if levs.size == 0: return np.zeros(len(levs), dtype=np.bool_) return levs.isin(values) # error: Incompatible types in assignment (expression has type overloaded function, # base class "Index" defined the type as "Callable[[Index, Any, bool], Any]") rename = Index.set_names # type: ignore[assignment] # --------------------------------------------------------------- # Arithmetic/Numeric Methods - Disabled __add__ = make_invalid_op("__add__") __radd__ = make_invalid_op("__radd__") __iadd__ = make_invalid_op("__iadd__") __sub__ = make_invalid_op("__sub__") __rsub__ = make_invalid_op("__rsub__") __isub__ = make_invalid_op("__isub__") __pow__ = make_invalid_op("__pow__") __rpow__ = make_invalid_op("__rpow__") __mul__ = make_invalid_op("__mul__") __rmul__ = make_invalid_op("__rmul__") __floordiv__ = make_invalid_op("__floordiv__") __rfloordiv__ = make_invalid_op("__rfloordiv__") __truediv__ = make_invalid_op("__truediv__") __rtruediv__ = make_invalid_op("__rtruediv__") __mod__ = make_invalid_op("__mod__") __rmod__ = make_invalid_op("__rmod__") __divmod__ = make_invalid_op("__divmod__") __rdivmod__ = make_invalid_op("__rdivmod__") # Unary methods disabled __neg__ = make_invalid_op("__neg__") __pos__ = make_invalid_op("__pos__") __abs__ = make_invalid_op("__abs__") __invert__ = make_invalid_op("__invert__") The provided code snippet includes necessary dependencies for implementing the `ensure_index_from_sequences` function. Write a Python function `def ensure_index_from_sequences(sequences, names=None) -> Index` to solve the following problem: Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"]) Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"]) MultiIndex([('a', 'a'), ('a', 'b')], names=['L1', 'L2']) See Also -------- ensure_index Here is the function: def ensure_index_from_sequences(sequences, names=None) -> Index: """ Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"]) Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"]) MultiIndex([('a', 'a'), ('a', 'b')], names=['L1', 'L2']) See Also -------- ensure_index """ from pandas.core.indexes.multi import MultiIndex if len(sequences) == 1: if names is not None: names = names[0] return Index(sequences[0], name=names) else: return MultiIndex.from_arrays(sequences, names=names)
Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"]) Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"]) MultiIndex([('a', 'a'), ('a', 'b')], names=['L1', 'L2']) See Also -------- ensure_index
173,176
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) The provided code snippet includes necessary dependencies for implementing the `ensure_has_len` function. Write a Python function `def ensure_has_len(seq)` to solve the following problem: If seq is an iterator, put its values into a list. Here is the function: def ensure_has_len(seq): """ If seq is an iterator, put its values into a list. """ try: len(seq) except TypeError: return list(seq) else: return seq
If seq is an iterator, put its values into a list.
173,177
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) The provided code snippet includes necessary dependencies for implementing the `trim_front` function. Write a Python function `def trim_front(strings: list[str]) -> list[str]` to solve the following problem: Trims zeros and decimal points. Examples -------- >>> trim_front([" a", " b"]) ['a', 'b'] >>> trim_front([" a", " "]) ['a', ''] Here is the function: def trim_front(strings: list[str]) -> list[str]: """ Trims zeros and decimal points. Examples -------- >>> trim_front([" a", " b"]) ['a', 'b'] >>> trim_front([" a", " "]) ['a', ''] """ if not strings: return strings while all(strings) and all(x[0] == " " for x in strings): strings = [x[1:] for x in strings] return strings
Trims zeros and decimal points. Examples -------- >>> trim_front([" a", " b"]) ['a', 'b'] >>> trim_front([" a", " "]) ['a', '']
173,178
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) def _validate_join_method(method: str) -> None: if method not in ["left", "right", "inner", "outer"]: raise ValueError(f"do not recognize join method {method}")
null
173,179
from __future__ import annotations from datetime import datetime import functools from itertools import zip_longest import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterable, Literal, NoReturn, Sequence, TypeVar, cast, final, overload, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, algos as libalgos, index as libindex, lib, ) from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import ( is_datetime_array, no_default, ) from pandas._libs.missing import is_float_nan from pandas._libs.tslibs import ( IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare, ) from pandas._typing import ( AnyAll, ArrayLike, Axes, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, JoinHow, Level, Shape, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( DuplicateLabelError, InvalidIndexError, ) from pandas.util._decorators import ( Appender, cache_readonly, doc, ) from pandas.util._exceptions import ( find_stack_level, rewrite_exception, ) from pandas.core.dtypes.astype import ( astype_array, astype_is_view, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex, ) from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import ( array_equivalent, is_valid_na_for_dtype, isna, ) from pandas.core import ( arraylike, ops, ) from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import ( setitem_datetimelike_compat, validate_putmask, ) from pandas.core.arrays import ( ArrowExtensionArray, BaseMaskedArray, Categorical, ExtensionArray, ) from pandas.core.arrays.string_ import StringArray from pandas.core.base import ( IndexOpsMixin, PandasObject, ) import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import disallow_ndim_indexing from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import ( PrettyDict, default_pprint, format_object_summary, pprint_thing, ) class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `maybe_extract_name` function. Write a Python function `def maybe_extract_name(name, obj, cls) -> Hashable` to solve the following problem: If no name is passed, then extract it from data, validating hashability. Here is the function: def maybe_extract_name(name, obj, cls) -> Hashable: """ If no name is passed, then extract it from data, validating hashability. """ if name is None and isinstance(obj, (Index, ABCSeries)): # Note we don't just check for "name" attribute since that would # pick up e.g. dtype.name name = obj.name # GH#29069 if not is_hashable(name): raise TypeError(f"{cls.__name__}.name must be a hashable type") return name
If no name is passed, then extract it from data, validating hashability.