id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
173,280
from __future__ import annotations from datetime import ( datetime, timedelta, ) from functools import wraps import operator from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Literal, Sequence, TypeVar, Union, cast, final, overload, ) import warnings import numpy as np from pandas._libs import ( algos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, astype_overflowsafe, delta_to_nanoseconds, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, to_offset, ) from pandas._libs.tslibs.fields import ( RoundTo, round_nsint64, ) from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidComparison, PerformanceWarning, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, ABCMultiIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core import ( algorithms, nanops, ops, ) from pandas.core.algorithms import ( checked_add_with_arr, isin, unique1d, ) from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import ( NDArrayBackedExtensionArray, ravel_compat, ) from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import ( check_array_indexer, check_setitem_lengths, ) from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import ( invalid_comparison, make_invalid_op, ) from pandas.tseries import frequencies def validate_periods(periods: None) -> None: ...
null
173,281
from __future__ import annotations from datetime import ( datetime, timedelta, ) from functools import wraps import operator from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Literal, Sequence, TypeVar, Union, cast, final, overload, ) import warnings import numpy as np from pandas._libs import ( algos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, astype_overflowsafe, delta_to_nanoseconds, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, to_offset, ) from pandas._libs.tslibs.fields import ( RoundTo, round_nsint64, ) from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidComparison, PerformanceWarning, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, ABCMultiIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core import ( algorithms, nanops, ops, ) from pandas.core.algorithms import ( checked_add_with_arr, isin, unique1d, ) from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import ( NDArrayBackedExtensionArray, ravel_compat, ) from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import ( check_array_indexer, check_setitem_lengths, ) from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import ( invalid_comparison, make_invalid_op, ) from pandas.tseries import frequencies def validate_periods(periods: int | float) -> int: ...
null
173,282
from __future__ import annotations from datetime import ( datetime, timedelta, ) from functools import wraps import operator from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Literal, Sequence, TypeVar, Union, cast, final, overload, ) import warnings import numpy as np from pandas._libs import ( algos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, astype_overflowsafe, delta_to_nanoseconds, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, to_offset, ) from pandas._libs.tslibs.fields import ( RoundTo, round_nsint64, ) from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidComparison, PerformanceWarning, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, ABCMultiIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core import ( algorithms, nanops, ops, ) from pandas.core.algorithms import ( checked_add_with_arr, isin, unique1d, ) from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import ( NDArrayBackedExtensionArray, ravel_compat, ) from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import ( check_array_indexer, check_setitem_lengths, ) from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import ( invalid_comparison, make_invalid_op, ) from pandas.tseries import frequencies def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... The provided code snippet includes necessary dependencies for implementing the `validate_periods` function. Write a Python function `def validate_periods(periods: int | float | None) -> int | None` to solve the following problem: If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int Here is the function: def validate_periods(periods: int | float | None) -> int | None: """ If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int """ if periods is not None: if lib.is_float(periods): periods = int(periods) elif not lib.is_integer(periods): raise TypeError(f"periods must be a number, got {periods}") periods = cast(int, periods) return periods
If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int
173,283
from __future__ import annotations from datetime import ( datetime, timedelta, ) from functools import wraps import operator from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Literal, Sequence, TypeVar, Union, cast, final, overload, ) import warnings import numpy as np from pandas._libs import ( algos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, astype_overflowsafe, delta_to_nanoseconds, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, to_offset, ) from pandas._libs.tslibs.fields import ( RoundTo, round_nsint64, ) from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidComparison, PerformanceWarning, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, ABCMultiIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core import ( algorithms, nanops, ops, ) from pandas.core.algorithms import ( checked_add_with_arr, isin, unique1d, ) from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import ( NDArrayBackedExtensionArray, ravel_compat, ) from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import ( check_array_indexer, check_setitem_lengths, ) from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import ( invalid_comparison, make_invalid_op, ) from pandas.tseries import frequencies The provided code snippet includes necessary dependencies for implementing the `validate_inferred_freq` function. Write a Python function `def validate_inferred_freq( freq, inferred_freq, freq_infer ) -> tuple[BaseOffset | None, bool]` to solve the following problem: If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None. Here is the function: def validate_inferred_freq( freq, inferred_freq, freq_infer ) -> tuple[BaseOffset | None, bool]: """ If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None. """ if inferred_freq is not None: if freq is not None and freq != inferred_freq: raise ValueError( f"Inferred frequency {inferred_freq} from passed " "values does not conform to passed frequency " f"{freq.freqstr}" ) if freq is None: freq = inferred_freq freq_infer = False return freq, freq_infer
If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None.
173,284
from __future__ import annotations from datetime import ( datetime, timedelta, ) from functools import wraps import operator from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Literal, Sequence, TypeVar, Union, cast, final, overload, ) import warnings import numpy as np from pandas._libs import ( algos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, astype_overflowsafe, delta_to_nanoseconds, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, to_offset, ) from pandas._libs.tslibs.fields import ( RoundTo, round_nsint64, ) from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidComparison, PerformanceWarning, ) from pandas.util._decorators import ( Appender, Substitution, cache_readonly, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, ABCMultiIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core import ( algorithms, nanops, ops, ) from pandas.core.algorithms import ( checked_add_with_arr, isin, unique1d, ) from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import ( NDArrayBackedExtensionArray, ravel_compat, ) from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import ( check_array_indexer, check_setitem_lengths, ) from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import ( invalid_comparison, make_invalid_op, ) from pandas.tseries import frequencies class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] The provided code snippet includes necessary dependencies for implementing the `dtype_to_unit` function. Write a Python function `def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype) -> str` to solve the following problem: Return the unit str corresponding to the dtype's resolution. Parameters ---------- dtype : DatetimeTZDtype or np.dtype If np.dtype, we assume it is a datetime64 dtype. Returns ------- str Here is the function: def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype) -> str: """ Return the unit str corresponding to the dtype's resolution. Parameters ---------- dtype : DatetimeTZDtype or np.dtype If np.dtype, we assume it is a datetime64 dtype. Returns ------- str """ if isinstance(dtype, DatetimeTZDtype): return dtype.unit return np.datetime_data(dtype)[0]
Return the unit str corresponding to the dtype's resolution. Parameters ---------- dtype : DatetimeTZDtype or np.dtype If np.dtype, we assume it is a datetime64 dtype. Returns ------- str
173,285
from __future__ import annotations from copy import deepcopy import operator import re from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, cast, ) import numpy as np from pandas._libs import lib from pandas._typing import ( ArrayLike, AxisInt, Dtype, FillnaOptions, Iterator, NpDtype, PositionalIndexer, Scalar, SortKind, TakeIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat import ( pa_version_under7p0, pa_version_under8p0, pa_version_under9p0, pa_version_under11p0, ) from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, ) from pandas.core.dtypes.missing import isna from pandas.core import roperator from pandas.core.arraylike import OpsMixin from pandas.core.arrays.base import ( ExtensionArray, ExtensionArraySupportsAnyAll, ) import pandas.core.common as com from pandas.core.indexers import ( check_array_indexer, unpack_tuple_and_ellipses, validate_indices, ) from pandas.core.strings.base import BaseStringArrayMethods from pandas.tseries.frequencies import to_offset def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def cast_for_truediv( arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar ) -> pa.ChunkedArray: # Ensure int / int -> float mirroring Python/Numpy behavior # as pc.divide_checked(int, int) -> int if pa.types.is_integer(arrow_array.type) and pa.types.is_integer( pa_object.type ): return arrow_array.cast(pa.float64()) return arrow_array
null
173,286
from __future__ import annotations from copy import deepcopy import operator import re from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, cast, ) import numpy as np from pandas._libs import lib from pandas._typing import ( ArrayLike, AxisInt, Dtype, FillnaOptions, Iterator, NpDtype, PositionalIndexer, Scalar, SortKind, TakeIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat import ( pa_version_under7p0, pa_version_under8p0, pa_version_under9p0, pa_version_under11p0, ) from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, ) from pandas.core.dtypes.missing import isna from pandas.core import roperator from pandas.core.arraylike import OpsMixin from pandas.core.arrays.base import ( ExtensionArray, ExtensionArraySupportsAnyAll, ) import pandas.core.common as com from pandas.core.indexers import ( check_array_indexer, unpack_tuple_and_ellipses, validate_indices, ) from pandas.core.strings.base import BaseStringArrayMethods from pandas.tseries.frequencies import to_offset def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def floordiv_compat( left: pa.ChunkedArray | pa.Array | pa.Scalar, right: pa.ChunkedArray | pa.Array | pa.Scalar, ) -> pa.ChunkedArray: # Ensure int // int -> int mirroring Python/Numpy behavior # as pc.floor(pc.divide_checked(int, int)) -> float result = pc.floor(pc.divide(left, right)) if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): result = result.cast(left.type) return result
null
173,287
from __future__ import annotations from copy import deepcopy import operator import re from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, cast, ) import numpy as np from pandas._libs import lib from pandas._typing import ( ArrayLike, AxisInt, Dtype, FillnaOptions, Iterator, NpDtype, PositionalIndexer, Scalar, SortKind, TakeIndexer, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.compat import ( pa_version_under7p0, pa_version_under8p0, pa_version_under9p0, pa_version_under11p0, ) from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, ) from pandas.core.dtypes.missing import isna from pandas.core import roperator from pandas.core.arraylike import OpsMixin from pandas.core.arrays.base import ( ExtensionArray, ExtensionArraySupportsAnyAll, ) import pandas.core.common as com from pandas.core.indexers import ( check_array_indexer, unpack_tuple_and_ellipses, validate_indices, ) from pandas.core.strings.base import BaseStringArrayMethods from pandas.tseries.frequencies import to_offset def get_unit_from_pa_dtype(pa_dtype): # https://github.com/pandas-dev/pandas/pull/50998#discussion_r1100344804 if pa_version_under11p0: unit = str(pa_dtype).split("[", 1)[-1][:-1] if unit not in ["s", "ms", "us", "ns"]: raise ValueError(pa_dtype) return unit return pa_dtype.unit
null
173,288
from __future__ import annotations import warnings import numpy as np import pyarrow from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level class PerformanceWarning(Warning): """ Warning raised when there is a possible performance impact. """ def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n The provided code snippet includes necessary dependencies for implementing the `fallback_performancewarning` function. Write a Python function `def fallback_performancewarning(version: str | None = None) -> None` to solve the following problem: Raise a PerformanceWarning for falling back to ExtensionArray's non-pyarrow method Here is the function: def fallback_performancewarning(version: str | None = None) -> None: """ Raise a PerformanceWarning for falling back to ExtensionArray's non-pyarrow method """ msg = "Falling back on a non-pyarrow code path which may decrease performance." if version is not None: msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning." warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
Raise a PerformanceWarning for falling back to ExtensionArray's non-pyarrow method
173,289
from __future__ import annotations import warnings import numpy as np import pyarrow from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level The provided code snippet includes necessary dependencies for implementing the `pyarrow_array_to_numpy_and_mask` function. Write a Python function `def pyarrow_array_to_numpy_and_mask( arr, dtype: np.dtype ) -> tuple[np.ndarray, np.ndarray]` to solve the following problem: Convert a primitive pyarrow.Array to a numpy array and boolean mask based on the buffers of the Array. At the moment pyarrow.BooleanArray is not supported. Parameters ---------- arr : pyarrow.Array dtype : numpy.dtype Returns ------- (data, mask) Tuple of two numpy arrays with the raw data (with specified dtype) and a boolean mask (validity mask, so False means missing) Here is the function: def pyarrow_array_to_numpy_and_mask( arr, dtype: np.dtype ) -> tuple[np.ndarray, np.ndarray]: """ Convert a primitive pyarrow.Array to a numpy array and boolean mask based on the buffers of the Array. At the moment pyarrow.BooleanArray is not supported. Parameters ---------- arr : pyarrow.Array dtype : numpy.dtype Returns ------- (data, mask) Tuple of two numpy arrays with the raw data (with specified dtype) and a boolean mask (validity mask, so False means missing) """ dtype = np.dtype(dtype) buflist = arr.buffers() # Since Arrow buffers might contain padding and the data might be offset, # the buffer gets sliced here before handing it to numpy. # See also https://github.com/pandas-dev/pandas/issues/40896 offset = arr.offset * dtype.itemsize length = len(arr) * dtype.itemsize data_buf = buflist[1][offset : offset + length] data = np.frombuffer(data_buf, dtype=dtype) bitmask = buflist[0] if bitmask is not None: mask = pyarrow.BooleanArray.from_buffers( pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset ) mask = np.asarray(mask) else: mask = np.ones(len(arr), dtype=bool) return data, mask
Convert a primitive pyarrow.Array to a numpy array and boolean mask based on the buffers of the Array. At the moment pyarrow.BooleanArray is not supported. Parameters ---------- arr : pyarrow.Array dtype : numpy.dtype Returns ------- (data, mask) Tuple of two numpy arrays with the raw data (with specified dtype) and a boolean mask (validity mask, so False means missing)
173,290
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com def _field_accessor(name: str, docstring=None): def f(self): base = self.freq._period_dtype_code result = get_period_field_arr(name, self.asi8, base) return result f.__name__ = name f.__doc__ = docstring return property(f)
null
173,291
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): """ Pandas ExtensionArray for storing Period data. Users should use :func:`~pandas.period_array` to create new instances. Alternatively, :func:`~pandas.array` can be used to create new instances from a sequence of Period scalars. Parameters ---------- values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex] The data to store. These should be arrays that can be directly converted to ordinals without inference or copy (PeriodArray, ndarray[int64]), or a box around such an array (Series[period], PeriodIndex). dtype : PeriodDtype, optional A PeriodDtype instance from which to extract a `freq`. If both `freq` and `dtype` are specified, then the frequencies must match. freq : str or DateOffset The `freq` to use for the array. Mostly applicable when `values` is an ndarray of integers, when `freq` is required. When `values` is a PeriodArray (or box around), it's checked that ``values.freq`` matches `freq`. copy : bool, default False Whether to copy the ordinals before storing. Attributes ---------- None Methods ------- None See Also -------- Period: Represents a period of time. PeriodIndex : Immutable Index for period data. period_range: Create a fixed-frequency PeriodArray. array: Construct a pandas array. Notes ----- There are two components to a PeriodArray - ordinals : integer ndarray - freq : pd.tseries.offsets.Offset The values are physically stored as a 1-D ndarray of integers. These are called "ordinals" and represent some kind of offset from a base. The `freq` indicates the span covered by each element of the array. All elements in the PeriodArray have the same `freq`. """ # array priority higher than numpy scalars __array_priority__ = 1000 _typ = "periodarray" # ABCPeriodArray _internal_fill_value = np.int64(iNaT) _recognized_scalars = (Period,) _is_recognized_dtype = is_period_dtype # check_compatible_with checks freq match _infer_matches = ("period",) def _scalar_type(self) -> type[Period]: return Period # Names others delegate to us _other_ops: list[str] = [] _bool_ops: list[str] = ["is_leap_year"] _object_ops: list[str] = ["start_time", "end_time", "freq"] _field_ops: list[str] = [ "year", "month", "day", "hour", "minute", "second", "weekofyear", "weekday", "week", "dayofweek", "day_of_week", "dayofyear", "day_of_year", "quarter", "qyear", "days_in_month", "daysinmonth", ] _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"] _dtype: PeriodDtype # -------------------------------------------------------------------- # Constructors def __init__( self, values, dtype: Dtype | None = None, freq=None, copy: bool = False ) -> None: freq = validate_dtype_freq(dtype, freq) if freq is not None: freq = Period._maybe_convert_freq(freq) if isinstance(values, ABCSeries): values = values._values if not isinstance(values, type(self)): raise TypeError("Incorrect dtype") elif isinstance(values, ABCPeriodIndex): values = values._values if isinstance(values, type(self)): if freq is not None and freq != values.freq: raise raise_on_incompatible(values, freq) values, freq = values._ndarray, values.freq values = np.array(values, dtype="int64", copy=copy) if freq is None: raise ValueError("freq is not specified and cannot be inferred") NDArrayBacked.__init__(self, values, PeriodDtype(freq)) # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" def _simple_new( # type: ignore[override] cls, values: np.ndarray, freq: BaseOffset | None = None, dtype: Dtype | None = None, ) -> PeriodArray: # alias for PeriodArray.__init__ assertion_msg = "Should be numpy array of type i8" assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg return cls(values, freq=freq, dtype=dtype) def _from_sequence( cls: type[PeriodArray], scalars: Sequence[Period | None] | AnyArrayLike, *, dtype: Dtype | None = None, copy: bool = False, ) -> PeriodArray: if dtype and isinstance(dtype, PeriodDtype): freq = dtype.freq else: freq = None if isinstance(scalars, cls): validate_dtype_freq(scalars.dtype, freq) if copy: scalars = scalars.copy() return scalars periods = np.asarray(scalars, dtype=object) freq = freq or libperiod.extract_freq(periods) ordinals = libperiod.extract_ordinals(periods, freq) return cls(ordinals, freq=freq) def _from_sequence_of_strings( cls, strings, *, dtype: Dtype | None = None, copy: bool = False ) -> PeriodArray: return cls._from_sequence(strings, dtype=dtype, copy=copy) def _from_datetime64(cls, data, freq, tz=None) -> PeriodArray: """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq) def _generate_range(cls, start, end, periods, freq, fields): periods = dtl.validate_periods(periods) if freq is not None: freq = Period._maybe_convert_freq(freq) field_count = len(fields) if start is not None or end is not None: if field_count > 0: raise ValueError( "Can either instantiate from fields or endpoints, but not both" ) subarr, freq = _get_ordinal_range(start, end, periods, freq) elif field_count > 0: subarr, freq = _range_from_fields(freq=freq, **fields) else: raise ValueError("Not enough parameters to construct Period range") return subarr, freq # ----------------------------------------------------------------- # DatetimeLike Interface # error: Argument 1 of "_unbox_scalar" is incompatible with supertype # "DatetimeLikeArrayMixin"; supertype defines the argument type as # "Union[Union[Period, Any, Timedelta], NaTType]" def _unbox_scalar( # type: ignore[override] self, value: Period | NaTType, ) -> np.int64: if value is NaT: # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value" return np.int64(value._value) # type: ignore[union-attr] elif isinstance(value, self._scalar_type): self._check_compatible_with(value) return np.int64(value.ordinal) else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) def _check_compatible_with(self, other) -> None: if other is NaT: return self._require_matching_freq(other) # -------------------------------------------------------------------- # Data / Attributes def dtype(self) -> PeriodDtype: return self._dtype # error: Cannot override writeable attribute with read-only property def freq(self) -> BaseOffset: """ Return the frequency object for this PeriodArray. """ return self.dtype.freq def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: if dtype == "i8": return self.asi8 elif dtype == bool: return ~self._isnan # This will raise TypeError for non-object dtypes return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): """ Convert myself into a pyarrow Array. """ import pyarrow from pandas.core.arrays.arrow.extension_types import ArrowPeriodType if type is not None: if pyarrow.types.is_integer(type): return pyarrow.array(self._ndarray, mask=self.isna(), type=type) elif isinstance(type, ArrowPeriodType): # ensure we have the same freq if self.freqstr != type.freq: raise TypeError( "Not supported to convert PeriodArray to array with different " f"'freq' ({self.freqstr} vs {type.freq})" ) else: raise TypeError( f"Not supported to convert PeriodArray to '{type}' type" ) period_type = ArrowPeriodType(self.freqstr) storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64") return pyarrow.ExtensionArray.from_storage(period_type, storage_array) # -------------------------------------------------------------------- # Vectorized analogues of Period properties year = _field_accessor( "year", """ The year of the period. """, ) month = _field_accessor( "month", """ The month as January=1, December=12. """, ) day = _field_accessor( "day", """ The days of the period. """, ) hour = _field_accessor( "hour", """ The hour of the period. """, ) minute = _field_accessor( "minute", """ The minute of the period. """, ) second = _field_accessor( "second", """ The second of the period. """, ) weekofyear = _field_accessor( "week", """ The week ordinal of the year. """, ) week = weekofyear day_of_week = _field_accessor( "day_of_week", """ The day of the week with Monday=0, Sunday=6. """, ) dayofweek = day_of_week weekday = dayofweek dayofyear = day_of_year = _field_accessor( "day_of_year", """ The ordinal day of the year. """, ) quarter = _field_accessor( "quarter", """ The quarter of the date. """, ) qyear = _field_accessor("qyear") days_in_month = _field_accessor( "days_in_month", """ The number of days in the month. """, ) daysinmonth = days_in_month def is_leap_year(self) -> np.ndarray: """ Logical indicating if the date belongs to a leap year. """ return isleapyear_arr(np.asarray(self.year)) def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: """ Cast to DatetimeArray/Index. Parameters ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, 'S' otherwise. how : {'s', 'e', 'start', 'end'} Whether to use the start or end of the time period being converted. Returns ------- DatetimeArray/Index """ from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == "E" if end: if freq == "B" or self.freq == "B": # roll forward to ensure we land on B date adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust else: adjust = Timedelta(1, "ns") return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: freq = self._dtype._get_to_timestamp_base() base = freq else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) dta = DatetimeArray(new_data) if self.freq.name == "B": # See if we can retain BDay instead of Day in cases where # len(self) is too small for infer_freq to distinguish between them diffs = libalgos.unique_deltas(self.asi8) if len(diffs) == 1: diff = diffs[0] if diff == self.freq.n: dta._freq = self.freq elif diff == 1: dta._freq = self.freq.base # TODO: other cases? return dta else: return dta._with_freq("infer") # -------------------------------------------------------------------- def _box_func(self, x) -> Period | NaTType: return Period._from_ordinal(ordinal=x, freq=self.freq) def asfreq(self, freq=None, how: str = "E") -> PeriodArray: """ Convert the {klass} to the specified frequency `freq`. Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments to each :class:`~pandas.Period` in this {klass}. Parameters ---------- freq : str A frequency. how : str {{'E', 'S'}}, default 'E' Whether the elements should be aligned to the end or start within pa period. * 'E', 'END', or 'FINISH' for end, * 'S', 'START', or 'BEGIN' for start. January 31st ('END') vs. January 1st ('START') for example. Returns ------- {klass} The transformed {klass} with the new frequency. See Also -------- {other}.asfreq: Convert each Period in a {other_name} to the given frequency. Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency. Examples -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[A-DEC]') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]') """ how = libperiod.validate_end_alias(how) freq = Period._maybe_convert_freq(freq) base1 = self._dtype._dtype_code base2 = freq._period_dtype_code asi8 = self.asi8 # self.freq.n can't be negative or 0 end = how == "E" if end: ordinal = asi8 + self.freq.n - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasna: new_data[self._isnan] = iNaT return type(self)(new_data, freq=freq) # ------------------------------------------------------------------ # Rendering Methods def _formatter(self, boxed: bool = False): if boxed: return str return "'{}'".format def _format_native_types( self, *, na_rep: str | float = "NaT", date_format=None, **kwargs ) -> npt.NDArray[np.object_]: """ actually format my specific types """ values = self.astype(object) # Create the formatter function if date_format: formatter = lambda per: per.strftime(date_format) else: # Uses `_Period.str` which in turn uses `format_period` formatter = lambda per: str(per) # Apply the formatter to all values in the array, possibly with a mask if self._hasna: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(per) for per in values[imask]]) else: values = np.array([formatter(per) for per in values]) return values # ------------------------------------------------------------------ def astype(self, dtype, copy: bool = True): # We handle Period[T] -> Period[U] # Our parent handles everything else. dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self._dtype): if not copy: return self else: return self.copy() if is_period_dtype(dtype): return self.asfreq(dtype.freq) if is_datetime64_any_dtype(dtype): # GH#45038 match PeriodIndex behavior. tz = getattr(dtype, "tz", None) return self.to_timestamp().tz_localize(tz) return super().astype(dtype, copy=copy) def searchsorted( self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: npvalue = self._validate_setitem_value(value).view("M8[ns]") # Cast to M8 to get datetime-like NaT placement, # similar to dtl._period_dispatch m8arr = self._ndarray.view("M8[ns]") return m8arr.searchsorted(npvalue, side=side, sorter=sorter) def fillna(self, value=None, method=None, limit=None) -> PeriodArray: if method is not None: # view as dt64 so we get treated as timelike in core.missing, # similar to dtl._period_dispatch dta = self.view("M8[ns]") result = dta.fillna(value=value, method=method, limit=limit) # error: Incompatible return value type (got "Union[ExtensionArray, # ndarray[Any, Any]]", expected "PeriodArray") return result.view(self.dtype) # type: ignore[return-value] return super().fillna(value=value, method=method, limit=limit) # ------------------------------------------------------------------ # Arithmetic Methods def _addsub_int_array_or_scalar( self, other: np.ndarray | int, op: Callable[[Any, Any], Any] ) -> PeriodArray: """ Add or subtract array of integers. Parameters ---------- other : np.ndarray[int64] or int op : {operator.add, operator.sub} Returns ------- result : PeriodArray """ assert op in [operator.add, operator.sub] if op is operator.sub: other = -other res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan) return type(self)(res_values, freq=self.freq) def _add_offset(self, other: BaseOffset): assert not isinstance(other, Tick) self._require_matching_freq(other, base=True) return self._addsub_int_array_or_scalar(other.n, operator.add) # TODO: can we de-duplicate with Period._add_timedeltalike_scalar? def _add_timedeltalike_scalar(self, other): """ Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- PeriodArray """ if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise raise_on_incompatible(self, other) if isna(other): # i.e. np.timedelta64("NaT") return super()._add_timedeltalike_scalar(other) td = np.asarray(Timedelta(other).asm8) return self._add_timedelta_arraylike(td) def _add_timedelta_arraylike( self, other: TimedeltaArray | npt.NDArray[np.timedelta64] ) -> PeriodArray: """ Parameters ---------- other : TimedeltaArray or ndarray[timedelta64] Returns ------- PeriodArray """ freq = self.freq if not isinstance(freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise TypeError( f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" ) dtype = np.dtype(f"m8[{freq._td64_unit}]") try: delta = astype_overflowsafe( np.asarray(other), dtype=dtype, copy=False, round_ok=False ) except ValueError as err: # e.g. if we have minutes freq and try to add 30s # "Cannot losslessly convert units" raise IncompatibleFrequency( "Cannot add/subtract timedelta-like from PeriodArray that is " "not an integer multiple of the PeriodArray's freq." ) from err b_mask = np.isnat(delta) res_values = algos.checked_add_with_arr( self.asi8, delta.view("i8"), arr_mask=self._isnan, b_mask=b_mask ) np.putmask(res_values, self._isnan | b_mask, iNaT) return type(self)(res_values, freq=self.freq) def _check_timedeltalike_freq_compat(self, other): """ Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency """ assert isinstance(self.freq, Tick) # checked by calling function dtype = np.dtype(f"m8[{self.freq._td64_unit}]") if isinstance(other, (timedelta, np.timedelta64, Tick)): td = np.asarray(Timedelta(other).asm8) else: td = np.asarray(other) try: delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False) except ValueError as err: raise raise_on_incompatible(self, other) from err delta = delta.view("i8") return lib.item_from_zerodim(delta) ABCPeriodIndex = cast( "Type[PeriodIndex]", create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), ) ABCTimedeltaArray = cast( "Type[TimedeltaArray]", create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")), ) The provided code snippet includes necessary dependencies for implementing the `raise_on_incompatible` function. Write a Python function `def raise_on_incompatible(left, right)` to solve the following problem: Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : None, DateOffset, Period, ndarray, or timedelta-like Returns ------- IncompatibleFrequency Exception to be raised by the caller. Here is the function: def raise_on_incompatible(left, right): """ Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : None, DateOffset, Period, ndarray, or timedelta-like Returns ------- IncompatibleFrequency Exception to be raised by the caller. """ # GH#24283 error message format depends on whether right is scalar if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)): other_freq = right.freqstr else: other_freq = delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format( cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq ) return IncompatibleFrequency(msg)
Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : None, DateOffset, Period, ndarray, or timedelta-like Returns ------- IncompatibleFrequency Exception to be raised by the caller.
173,292
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): """ Pandas ExtensionArray for storing Period data. Users should use :func:`~pandas.period_array` to create new instances. Alternatively, :func:`~pandas.array` can be used to create new instances from a sequence of Period scalars. Parameters ---------- values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex] The data to store. These should be arrays that can be directly converted to ordinals without inference or copy (PeriodArray, ndarray[int64]), or a box around such an array (Series[period], PeriodIndex). dtype : PeriodDtype, optional A PeriodDtype instance from which to extract a `freq`. If both `freq` and `dtype` are specified, then the frequencies must match. freq : str or DateOffset The `freq` to use for the array. Mostly applicable when `values` is an ndarray of integers, when `freq` is required. When `values` is a PeriodArray (or box around), it's checked that ``values.freq`` matches `freq`. copy : bool, default False Whether to copy the ordinals before storing. Attributes ---------- None Methods ------- None See Also -------- Period: Represents a period of time. PeriodIndex : Immutable Index for period data. period_range: Create a fixed-frequency PeriodArray. array: Construct a pandas array. Notes ----- There are two components to a PeriodArray - ordinals : integer ndarray - freq : pd.tseries.offsets.Offset The values are physically stored as a 1-D ndarray of integers. These are called "ordinals" and represent some kind of offset from a base. The `freq` indicates the span covered by each element of the array. All elements in the PeriodArray have the same `freq`. """ # array priority higher than numpy scalars __array_priority__ = 1000 _typ = "periodarray" # ABCPeriodArray _internal_fill_value = np.int64(iNaT) _recognized_scalars = (Period,) _is_recognized_dtype = is_period_dtype # check_compatible_with checks freq match _infer_matches = ("period",) def _scalar_type(self) -> type[Period]: return Period # Names others delegate to us _other_ops: list[str] = [] _bool_ops: list[str] = ["is_leap_year"] _object_ops: list[str] = ["start_time", "end_time", "freq"] _field_ops: list[str] = [ "year", "month", "day", "hour", "minute", "second", "weekofyear", "weekday", "week", "dayofweek", "day_of_week", "dayofyear", "day_of_year", "quarter", "qyear", "days_in_month", "daysinmonth", ] _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"] _dtype: PeriodDtype # -------------------------------------------------------------------- # Constructors def __init__( self, values, dtype: Dtype | None = None, freq=None, copy: bool = False ) -> None: freq = validate_dtype_freq(dtype, freq) if freq is not None: freq = Period._maybe_convert_freq(freq) if isinstance(values, ABCSeries): values = values._values if not isinstance(values, type(self)): raise TypeError("Incorrect dtype") elif isinstance(values, ABCPeriodIndex): values = values._values if isinstance(values, type(self)): if freq is not None and freq != values.freq: raise raise_on_incompatible(values, freq) values, freq = values._ndarray, values.freq values = np.array(values, dtype="int64", copy=copy) if freq is None: raise ValueError("freq is not specified and cannot be inferred") NDArrayBacked.__init__(self, values, PeriodDtype(freq)) # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" def _simple_new( # type: ignore[override] cls, values: np.ndarray, freq: BaseOffset | None = None, dtype: Dtype | None = None, ) -> PeriodArray: # alias for PeriodArray.__init__ assertion_msg = "Should be numpy array of type i8" assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg return cls(values, freq=freq, dtype=dtype) def _from_sequence( cls: type[PeriodArray], scalars: Sequence[Period | None] | AnyArrayLike, *, dtype: Dtype | None = None, copy: bool = False, ) -> PeriodArray: if dtype and isinstance(dtype, PeriodDtype): freq = dtype.freq else: freq = None if isinstance(scalars, cls): validate_dtype_freq(scalars.dtype, freq) if copy: scalars = scalars.copy() return scalars periods = np.asarray(scalars, dtype=object) freq = freq or libperiod.extract_freq(periods) ordinals = libperiod.extract_ordinals(periods, freq) return cls(ordinals, freq=freq) def _from_sequence_of_strings( cls, strings, *, dtype: Dtype | None = None, copy: bool = False ) -> PeriodArray: return cls._from_sequence(strings, dtype=dtype, copy=copy) def _from_datetime64(cls, data, freq, tz=None) -> PeriodArray: """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq) def _generate_range(cls, start, end, periods, freq, fields): periods = dtl.validate_periods(periods) if freq is not None: freq = Period._maybe_convert_freq(freq) field_count = len(fields) if start is not None or end is not None: if field_count > 0: raise ValueError( "Can either instantiate from fields or endpoints, but not both" ) subarr, freq = _get_ordinal_range(start, end, periods, freq) elif field_count > 0: subarr, freq = _range_from_fields(freq=freq, **fields) else: raise ValueError("Not enough parameters to construct Period range") return subarr, freq # ----------------------------------------------------------------- # DatetimeLike Interface # error: Argument 1 of "_unbox_scalar" is incompatible with supertype # "DatetimeLikeArrayMixin"; supertype defines the argument type as # "Union[Union[Period, Any, Timedelta], NaTType]" def _unbox_scalar( # type: ignore[override] self, value: Period | NaTType, ) -> np.int64: if value is NaT: # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value" return np.int64(value._value) # type: ignore[union-attr] elif isinstance(value, self._scalar_type): self._check_compatible_with(value) return np.int64(value.ordinal) else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) def _check_compatible_with(self, other) -> None: if other is NaT: return self._require_matching_freq(other) # -------------------------------------------------------------------- # Data / Attributes def dtype(self) -> PeriodDtype: return self._dtype # error: Cannot override writeable attribute with read-only property def freq(self) -> BaseOffset: """ Return the frequency object for this PeriodArray. """ return self.dtype.freq def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: if dtype == "i8": return self.asi8 elif dtype == bool: return ~self._isnan # This will raise TypeError for non-object dtypes return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): """ Convert myself into a pyarrow Array. """ import pyarrow from pandas.core.arrays.arrow.extension_types import ArrowPeriodType if type is not None: if pyarrow.types.is_integer(type): return pyarrow.array(self._ndarray, mask=self.isna(), type=type) elif isinstance(type, ArrowPeriodType): # ensure we have the same freq if self.freqstr != type.freq: raise TypeError( "Not supported to convert PeriodArray to array with different " f"'freq' ({self.freqstr} vs {type.freq})" ) else: raise TypeError( f"Not supported to convert PeriodArray to '{type}' type" ) period_type = ArrowPeriodType(self.freqstr) storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64") return pyarrow.ExtensionArray.from_storage(period_type, storage_array) # -------------------------------------------------------------------- # Vectorized analogues of Period properties year = _field_accessor( "year", """ The year of the period. """, ) month = _field_accessor( "month", """ The month as January=1, December=12. """, ) day = _field_accessor( "day", """ The days of the period. """, ) hour = _field_accessor( "hour", """ The hour of the period. """, ) minute = _field_accessor( "minute", """ The minute of the period. """, ) second = _field_accessor( "second", """ The second of the period. """, ) weekofyear = _field_accessor( "week", """ The week ordinal of the year. """, ) week = weekofyear day_of_week = _field_accessor( "day_of_week", """ The day of the week with Monday=0, Sunday=6. """, ) dayofweek = day_of_week weekday = dayofweek dayofyear = day_of_year = _field_accessor( "day_of_year", """ The ordinal day of the year. """, ) quarter = _field_accessor( "quarter", """ The quarter of the date. """, ) qyear = _field_accessor("qyear") days_in_month = _field_accessor( "days_in_month", """ The number of days in the month. """, ) daysinmonth = days_in_month def is_leap_year(self) -> np.ndarray: """ Logical indicating if the date belongs to a leap year. """ return isleapyear_arr(np.asarray(self.year)) def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: """ Cast to DatetimeArray/Index. Parameters ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, 'S' otherwise. how : {'s', 'e', 'start', 'end'} Whether to use the start or end of the time period being converted. Returns ------- DatetimeArray/Index """ from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == "E" if end: if freq == "B" or self.freq == "B": # roll forward to ensure we land on B date adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust else: adjust = Timedelta(1, "ns") return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: freq = self._dtype._get_to_timestamp_base() base = freq else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) dta = DatetimeArray(new_data) if self.freq.name == "B": # See if we can retain BDay instead of Day in cases where # len(self) is too small for infer_freq to distinguish between them diffs = libalgos.unique_deltas(self.asi8) if len(diffs) == 1: diff = diffs[0] if diff == self.freq.n: dta._freq = self.freq elif diff == 1: dta._freq = self.freq.base # TODO: other cases? return dta else: return dta._with_freq("infer") # -------------------------------------------------------------------- def _box_func(self, x) -> Period | NaTType: return Period._from_ordinal(ordinal=x, freq=self.freq) def asfreq(self, freq=None, how: str = "E") -> PeriodArray: """ Convert the {klass} to the specified frequency `freq`. Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments to each :class:`~pandas.Period` in this {klass}. Parameters ---------- freq : str A frequency. how : str {{'E', 'S'}}, default 'E' Whether the elements should be aligned to the end or start within pa period. * 'E', 'END', or 'FINISH' for end, * 'S', 'START', or 'BEGIN' for start. January 31st ('END') vs. January 1st ('START') for example. Returns ------- {klass} The transformed {klass} with the new frequency. See Also -------- {other}.asfreq: Convert each Period in a {other_name} to the given frequency. Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency. Examples -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[A-DEC]') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]') """ how = libperiod.validate_end_alias(how) freq = Period._maybe_convert_freq(freq) base1 = self._dtype._dtype_code base2 = freq._period_dtype_code asi8 = self.asi8 # self.freq.n can't be negative or 0 end = how == "E" if end: ordinal = asi8 + self.freq.n - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasna: new_data[self._isnan] = iNaT return type(self)(new_data, freq=freq) # ------------------------------------------------------------------ # Rendering Methods def _formatter(self, boxed: bool = False): if boxed: return str return "'{}'".format def _format_native_types( self, *, na_rep: str | float = "NaT", date_format=None, **kwargs ) -> npt.NDArray[np.object_]: """ actually format my specific types """ values = self.astype(object) # Create the formatter function if date_format: formatter = lambda per: per.strftime(date_format) else: # Uses `_Period.str` which in turn uses `format_period` formatter = lambda per: str(per) # Apply the formatter to all values in the array, possibly with a mask if self._hasna: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(per) for per in values[imask]]) else: values = np.array([formatter(per) for per in values]) return values # ------------------------------------------------------------------ def astype(self, dtype, copy: bool = True): # We handle Period[T] -> Period[U] # Our parent handles everything else. dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self._dtype): if not copy: return self else: return self.copy() if is_period_dtype(dtype): return self.asfreq(dtype.freq) if is_datetime64_any_dtype(dtype): # GH#45038 match PeriodIndex behavior. tz = getattr(dtype, "tz", None) return self.to_timestamp().tz_localize(tz) return super().astype(dtype, copy=copy) def searchsorted( self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: npvalue = self._validate_setitem_value(value).view("M8[ns]") # Cast to M8 to get datetime-like NaT placement, # similar to dtl._period_dispatch m8arr = self._ndarray.view("M8[ns]") return m8arr.searchsorted(npvalue, side=side, sorter=sorter) def fillna(self, value=None, method=None, limit=None) -> PeriodArray: if method is not None: # view as dt64 so we get treated as timelike in core.missing, # similar to dtl._period_dispatch dta = self.view("M8[ns]") result = dta.fillna(value=value, method=method, limit=limit) # error: Incompatible return value type (got "Union[ExtensionArray, # ndarray[Any, Any]]", expected "PeriodArray") return result.view(self.dtype) # type: ignore[return-value] return super().fillna(value=value, method=method, limit=limit) # ------------------------------------------------------------------ # Arithmetic Methods def _addsub_int_array_or_scalar( self, other: np.ndarray | int, op: Callable[[Any, Any], Any] ) -> PeriodArray: """ Add or subtract array of integers. Parameters ---------- other : np.ndarray[int64] or int op : {operator.add, operator.sub} Returns ------- result : PeriodArray """ assert op in [operator.add, operator.sub] if op is operator.sub: other = -other res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan) return type(self)(res_values, freq=self.freq) def _add_offset(self, other: BaseOffset): assert not isinstance(other, Tick) self._require_matching_freq(other, base=True) return self._addsub_int_array_or_scalar(other.n, operator.add) # TODO: can we de-duplicate with Period._add_timedeltalike_scalar? def _add_timedeltalike_scalar(self, other): """ Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- PeriodArray """ if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise raise_on_incompatible(self, other) if isna(other): # i.e. np.timedelta64("NaT") return super()._add_timedeltalike_scalar(other) td = np.asarray(Timedelta(other).asm8) return self._add_timedelta_arraylike(td) def _add_timedelta_arraylike( self, other: TimedeltaArray | npt.NDArray[np.timedelta64] ) -> PeriodArray: """ Parameters ---------- other : TimedeltaArray or ndarray[timedelta64] Returns ------- PeriodArray """ freq = self.freq if not isinstance(freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise TypeError( f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" ) dtype = np.dtype(f"m8[{freq._td64_unit}]") try: delta = astype_overflowsafe( np.asarray(other), dtype=dtype, copy=False, round_ok=False ) except ValueError as err: # e.g. if we have minutes freq and try to add 30s # "Cannot losslessly convert units" raise IncompatibleFrequency( "Cannot add/subtract timedelta-like from PeriodArray that is " "not an integer multiple of the PeriodArray's freq." ) from err b_mask = np.isnat(delta) res_values = algos.checked_add_with_arr( self.asi8, delta.view("i8"), arr_mask=self._isnan, b_mask=b_mask ) np.putmask(res_values, self._isnan | b_mask, iNaT) return type(self)(res_values, freq=self.freq) def _check_timedeltalike_freq_compat(self, other): """ Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency """ assert isinstance(self.freq, Tick) # checked by calling function dtype = np.dtype(f"m8[{self.freq._td64_unit}]") if isinstance(other, (timedelta, np.timedelta64, Tick)): td = np.asarray(Timedelta(other).asm8) else: td = np.asarray(other) try: delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False) except ValueError as err: raise raise_on_incompatible(self, other) from err delta = delta.view("i8") return lib.item_from_zerodim(delta) class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... AnyArrayLike = Union[ArrayLike, "Index", "Series"] ensure_object = algos.ensure_object def is_datetime64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64_dtype >>> is_datetime64_dtype(object) False >>> is_datetime64_dtype(np.datetime64) True >>> is_datetime64_dtype(np.array([], dtype=int)) False >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) True >>> is_datetime64_dtype([1, 2, 3]) False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "M" return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) def is_period_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Period dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Period dtype. Examples -------- >>> is_period_dtype(object) False >>> is_period_dtype(PeriodDtype(freq="D")) True >>> is_period_dtype([1, 2, 3]) False >>> is_period_dtype(pd.Period("2017-01-01")) False >>> is_period_dtype(pd.PeriodIndex([], freq="A")) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.type is Period if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): """ An ExtensionDtype for Period data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- freq : str or DateOffset The frequency of this PeriodDtype. Attributes ---------- freq Methods ------- None Examples -------- >>> pd.PeriodDtype(freq='D') period[D] >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd()) period[M] """ type: type[Period] = Period kind: str_type = "O" str = "|O08" base = np.dtype("O") num = 102 _metadata = ("freq",) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __new__(cls, freq=None): """ Parameters ---------- freq : frequency """ if isinstance(freq, PeriodDtype): return freq elif freq is None: # empty constructor for pickle compat # -10_000 corresponds to PeriodDtypeCode.UNDEFINED u = PeriodDtypeBase.__new__(cls, -10_000) u._freq = None return u if not isinstance(freq, BaseOffset): freq = cls._parse_dtype_strict(freq) try: return cls._cache_dtypes[freq.freqstr] except KeyError: dtype_code = freq._period_dtype_code u = PeriodDtypeBase.__new__(cls, dtype_code) u._freq = freq cls._cache_dtypes[freq.freqstr] = u return u def __reduce__(self): return type(self), (self.freq,) def freq(self): """ The frequency object of this PeriodDtype. """ return self._freq def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: if isinstance(freq, str): # note: freq is already of type str! if freq.startswith("period[") or freq.startswith("Period["): m = cls._match.search(freq) if m is not None: freq = m.group("freq") freq_offset = to_offset(freq) if freq_offset is not None: return freq_offset raise ValueError("could not construct PeriodDtype") def construct_from_string(cls, string: str_type) -> PeriodDtype: """ Strict construction from a string, raise a TypeError if not possible """ if ( isinstance(string, str) and (string.startswith("period[") or string.startswith("Period[")) or isinstance(string, BaseOffset) ): # do not parse string like U as period[U] # avoid tuple to be regarded as freq try: return cls(freq=string) except ValueError: pass if isinstance(string, str): msg = f"Cannot construct a 'PeriodDtype' from '{string}'" else: msg = f"'construct_from_string' expects a string, got {type(string)}" raise TypeError(msg) def __str__(self) -> str_type: return self.name def name(self) -> str_type: return f"period[{self.freq.freqstr}]" def na_value(self) -> NaTType: return NaT def __hash__(self) -> int: # make myself hashable return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): return other in [self.name, self.name.title()] elif isinstance(other, PeriodDtype): # For freqs that can be held by a PeriodDtype, this check is # equivalent to (and much faster than) self.freq == other.freq sfreq = self.freq ofreq = other.freq return ( sfreq.n == ofreq.n and sfreq._period_dtype_code == ofreq._period_dtype_code ) return False def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __setstate__(self, state) -> None: # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._freq = state["freq"] def is_dtype(cls, dtype: object) -> bool: """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ if isinstance(dtype, str): # PeriodDtype can be instantiated from freq string like "U", # but doesn't regard freq str like "U" as dtype. if dtype.startswith("period[") or dtype.startswith("Period["): try: return cls._parse_dtype_strict(dtype) is not None except ValueError: return False else: return False return super().is_dtype(dtype) def construct_array_type(cls) -> type_t[PeriodArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import PeriodArray return PeriodArray def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> PeriodArray: """ Construct PeriodArray from pyarrow Array/ChunkedArray. """ import pyarrow from pandas.core.arrays import PeriodArray from pandas.core.arrays.arrow._arrow_utils import ( pyarrow_array_to_numpy_and_mask, ) if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) parr = PeriodArray(data.copy(), freq=self.freq, copy=False) # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray"; # expected type "Union[int, Sequence[int], Sequence[bool], slice]" parr[~mask] = NaT # type: ignore[index] results.append(parr) if not results: return PeriodArray(np.array([], dtype="int64"), freq=self.freq, copy=False) return PeriodArray._concat_same_type(results) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `period_array` function. Write a Python function `def period_array( data: Sequence[Period | str | None] | AnyArrayLike, freq: str | Tick | None = None, copy: bool = False, ) -> PeriodArray` to solve the following problem: Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') <PeriodArray> ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC] Here is the function: def period_array( data: Sequence[Period | str | None] | AnyArrayLike, freq: str | Tick | None = None, copy: bool = False, ) -> PeriodArray: """ Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') <PeriodArray> ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC] """ data_dtype = getattr(data, "dtype", None) if is_datetime64_dtype(data_dtype): return PeriodArray._from_datetime64(data, freq) if is_period_dtype(data_dtype): return PeriodArray(data, freq=freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): data = list(data) arrdata = np.asarray(data) dtype: PeriodDtype | None if freq: dtype = PeriodDtype(freq) else: dtype = None if is_float_dtype(arrdata) and len(arrdata) > 0: raise TypeError("PeriodIndex does not allow floating point in construction") if is_integer_dtype(arrdata.dtype): arr = arrdata.astype(np.int64, copy=False) # error: Argument 2 to "from_ordinals" has incompatible type "Union[str, # Tick, None]"; expected "Union[timedelta, BaseOffset, str]" ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type] return PeriodArray(ordinals, dtype=dtype) data = ensure_object(arrdata) return PeriodArray._from_sequence(data, dtype=dtype)
Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') <PeriodArray> ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC]
173,293
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset) def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: ...
null
173,294
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com class timedelta(SupportsAbs[timedelta]): min: ClassVar[timedelta] max: ClassVar[timedelta] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., ) -> None: ... def days(self) -> int: ... def seconds(self) -> int: ... def microseconds(self) -> int: ... def total_seconds(self) -> float: ... def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... def __rsub__(self, other: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... def __floordiv__(self, other: timedelta) -> int: ... def __floordiv__(self, other: int) -> timedelta: ... if sys.version_info >= (3,): def __truediv__(self, other: timedelta) -> float: ... def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ... else: def __div__(self, other: timedelta) -> float: ... def __div__(self, other: float) -> timedelta: ... def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... def __gt__(self, other: timedelta) -> bool: ... def __hash__(self) -> int: ... def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: ...
null
173,295
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset) class timedelta(SupportsAbs[timedelta]): min: ClassVar[timedelta] max: ClassVar[timedelta] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., ) -> None: ... def days(self) -> int: ... def seconds(self) -> int: ... def microseconds(self) -> int: ... def total_seconds(self) -> float: ... def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... def __rsub__(self, other: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... def __floordiv__(self, other: timedelta) -> int: ... def __floordiv__(self, other: int) -> timedelta: ... if sys.version_info >= (3,): def __truediv__(self, other: timedelta) -> float: ... def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ... else: def __div__(self, other: timedelta) -> float: ... def __div__(self, other: float) -> timedelta: ... def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... def __gt__(self, other: timedelta) -> bool: ... def __hash__(self) -> int: ... def is_period_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Period dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Period dtype. Examples -------- >>> is_period_dtype(object) False >>> is_period_dtype(PeriodDtype(freq="D")) True >>> is_period_dtype([1, 2, 3]) False >>> is_period_dtype(pd.Period("2017-01-01")) False >>> is_period_dtype(pd.PeriodIndex([], freq="A")) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.type is Period if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype The provided code snippet includes necessary dependencies for implementing the `validate_dtype_freq` function. Write a Python function `def validate_dtype_freq( dtype, freq: BaseOffsetT | timedelta | str | None ) -> BaseOffsetT` to solve the following problem: If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq Here is the function: def validate_dtype_freq( dtype, freq: BaseOffsetT | timedelta | str | None ) -> BaseOffsetT: """ If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: # error: Incompatible types in assignment (expression has type # "BaseOffset", variable has type "Union[BaseOffsetT, timedelta, # str, None]") freq = to_offset(freq) # type: ignore[assignment] if dtype is not None: dtype = pandas_dtype(dtype) if not is_period_dtype(dtype): raise ValueError("dtype must be PeriodDtype") if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency("specified freq and dtype are different") # error: Incompatible return value type (got "Union[BaseOffset, Any, None]", # expected "BaseOffset") return freq # type: ignore[return-value]
If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq
173,296
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `dt64arr_to_periodarr` function. Write a Python function `def dt64arr_to_periodarr( data, freq, tz=None ) -> tuple[npt.NDArray[np.int64], BaseOffset]` to solve the following problem: Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used. Here is the function: def dt64arr_to_periodarr( data, freq, tz=None ) -> tuple[npt.NDArray[np.int64], BaseOffset]: """ Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used. """ if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M": raise ValueError(f"Wrong dtype: {data.dtype}") if freq is None: if isinstance(data, ABCIndex): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq elif isinstance(data, (ABCIndex, ABCSeries)): data = data._values reso = get_unit_from_dtype(data.dtype) freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used.
173,297
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com def _get_ordinal_range(start, end, periods, freq, mult: int = 1): if com.count_not_none(start, end, periods) != 2: raise ValueError( "Of the three parameters: start, end, and periods, " "exactly two must be specified" ) if freq is not None: freq = to_offset(freq) mult = freq.n if start is not None: start = Period(start, freq) if end is not None: end = Period(end, freq) is_start_per = isinstance(start, Period) is_end_per = isinstance(end, Period) if is_start_per and is_end_per and start.freq != end.freq: raise ValueError("start and end must have same freq") if start is NaT or end is NaT: raise ValueError("start and end must not be NaT") if freq is None: if is_start_per: freq = start.freq elif is_end_per: freq = end.freq else: # pragma: no cover raise ValueError("Could not infer freq from start/end") if periods is not None: periods = periods * mult if start is None: data = np.arange( end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64 ) else: data = np.arange( start.ordinal, start.ordinal + periods, mult, dtype=np.int64 ) else: data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) return data, freq
null
173,298
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import ( algos as libalgos, lib, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Timedelta, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( Tick, delta_to_tick, ) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import ( AnyArrayLike, Dtype, NpDtype, npt, ) from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_any_dtype, is_datetime64_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com def _make_field_arrays(*fields) -> list[np.ndarray]: def _range_from_fields( year=None, month=None, quarter=None, day=None, hour=None, minute=None, second=None, freq=None, ) -> tuple[np.ndarray, BaseOffset]: if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if day is None: day = 1 ordinals = [] if quarter is not None: if freq is None: freq = to_offset("Q") base = FreqGroup.FR_QTR.value else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) if base != FreqGroup.FR_QTR.value: raise AssertionError("base must equal FR_QTR") freqstr = freq.freqstr year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): y, m = parsing.quarter_to_myear(y, q, freqstr) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in zip(*arrays): ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) return np.array(ordinals, dtype=np.int64), freq
null
173,299
from __future__ import annotations import operator from operator import ( le, lt, ) import textwrap from typing import ( TYPE_CHECKING, Iterator, Literal, Sequence, TypeVar, Union, cast, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib from pandas._libs.interval import ( VALID_CLOSED, Interval, IntervalMixin, intervals_to_interval_bounds, ) from pandas._libs.missing import NA from pandas._typing import ( ArrayLike, AxisInt, Dtype, IntervalClosedType, NpDtype, PositionalIndexer, ScalarIndexer, SequenceIndexer, SortKind, TimeArrayLike, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import IntCastingNaNError from pandas.util._decorators import Appender from pandas.core.dtypes.cast import ( LossySetitemError, maybe_upcast_numeric_to_64bit, ) from pandas.core.dtypes.common import ( is_categorical_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, is_interval_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import IntervalDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCIntervalIndex, ABCPeriodIndex, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, notna, ) from pandas.core.algorithms import ( isin, take, unique, value_counts, ) from pandas.core.arrays.base import ( ExtensionArray, _extension_array_shared_docs, ) from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.timedeltas import TimedeltaArray import pandas.core.common as com from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import check_array_indexer from pandas.core.ops import ( invalid_comparison, unpack_zerodim_and_defer, ) def dtype(self) -> IntervalDtype: return self._dtype def astype(self, dtype, copy: bool = True): """ Cast to an ExtensionArray or NumPy array with dtype 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- array : ExtensionArray or ndarray ExtensionArray or NumPy ndarray with 'dtype' for its dtype. """ from pandas import Index if dtype is not None: dtype = pandas_dtype(dtype) if is_interval_dtype(dtype): if dtype == self.dtype: return self.copy() if copy else self if is_float_dtype(self.dtype.subtype) and needs_i8_conversion( dtype.subtype ): # This is allowed on the Index.astype but we disallow it here msg = ( f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" ) raise TypeError(msg) # need to cast to different subtype try: # We need to use Index rules for astype to prevent casting # np.nan entries to int subtypes new_left = Index(self._left, copy=False).astype(dtype.subtype) new_right = Index(self._right, copy=False).astype(dtype.subtype) except IntCastingNaNError: # e.g test_subtype_integer raise except (TypeError, ValueError) as err: # e.g. test_subtype_integer_errors f8->u8 can be lossy # and raises ValueError msg = ( f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" ) raise TypeError(msg) from err return self._shallow_copy(new_left, new_right) else: try: return super().astype(dtype, copy=copy) except (TypeError, ValueError) as err: msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" raise TypeError(msg) from err def _maybe_convert_platform_interval(values) -> ArrayLike: """ Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array """ if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif not is_list_like(values) or isinstance(values, ABCDataFrame): # This will raise later, but we avoid passing to maybe_convert_platform return values elif is_categorical_dtype(values): values = np.asarray(values) elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): # TODO: should we just cast these to list? return values else: values = extract_array(values, extract_numpy=True) if not hasattr(values, "dtype"): values = np.asarray(values) if is_integer_dtype(values) and values.dtype != np.int64: values = values.astype(np.int64) return values ArrayLike = Union["ExtensionArray", np.ndarray] def is_categorical_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Categorical dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Categorical dtype. Examples -------- >>> from pandas.api.types import is_categorical_dtype >>> from pandas import CategoricalDtype >>> is_categorical_dtype(object) False >>> is_categorical_dtype(CategoricalDtype()) True >>> is_categorical_dtype([1, 2, 3]) False >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) True >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.name == "category" if arr_or_dtype is None: return False return CategoricalDtype.is_dtype(arr_or_dtype) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) def array( data: Sequence[object] | AnyArrayLike, dtype: Dtype | None = None, copy: bool = True, ) -> ExtensionArray: """ Create an array. Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ======================================= Scalar Type Array Type ============================== ======================================= :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` or :class:`pandas.arrays.ArrowStringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ======================================= The ExtensionArray created when the scalar type is :class:`str` is determined by ``pd.options.mode.string_storage`` if the dtype is not explicitly given. For all other cases, NumPy's usual inference rules will be used. .. versionchanged:: 1.2.0 Pandas now also infers nullable-floating dtype for float-like input data copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.PandasArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``PandasArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> with pd.option_context("string_storage", "pyarrow"): ... arr = pd.array(["a", None, "c"]) ... >>> arr <ArrowStringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <PandasArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <PandasArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( BooleanArray, DatetimeArray, ExtensionArray, FloatingArray, IntegerArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) elif isinstance(data, ABCDataFrame): raise TypeError("Cannot pass DataFrame to 'pandas.array'") if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype data = extract_array(data, extract_numpy=True) # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype if isinstance(data, ExtensionArray) and ( dtype is None or is_dtype_equal(dtype, data.dtype) ): # e.g. TimedeltaArray[s], avoid casting to PandasArray if copy: return data.copy() return data if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) return PeriodArray._from_sequence(period_data, copy=copy) elif inferred_dtype == "interval": return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith("timedelta"): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) elif inferred_dtype == "string": # StringArray/ArrowStringArray depending on pd.options.mode.string_storage return StringDtype().construct_array_type()._from_sequence(data, copy=copy) elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) elif ( inferred_dtype in ("floating", "mixed-integer-float") and getattr(data, "dtype", None) != np.float16 ): # GH#44715 Exclude np.float16 bc FloatingArray does not support it; # we will fall back to PandasArray. return FloatingArray._from_sequence(data, copy=copy) elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns] # 2. timedelta64[ns] # so that a DatetimeArray is returned. if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) return PandasArray._from_sequence(data, dtype=dtype, copy=copy) The provided code snippet includes necessary dependencies for implementing the `_maybe_convert_platform_interval` function. Write a Python function `def _maybe_convert_platform_interval(values) -> ArrayLike` to solve the following problem: Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array Here is the function: def _maybe_convert_platform_interval(values) -> ArrayLike: """ Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array """ if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif not is_list_like(values) or isinstance(values, ABCDataFrame): # This will raise later, but we avoid passing to maybe_convert_platform return values elif is_categorical_dtype(values): values = np.asarray(values) elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): # TODO: should we just cast these to list? return values else: values = extract_array(values, extract_numpy=True) if not hasattr(values, "dtype"): values = np.asarray(values) if is_integer_dtype(values) and values.dtype != np.int64: values = values.astype(np.int64) return values
Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array
173,300
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslibs, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Tick, Timedelta, astype_overflowsafe, get_supported_reso, get_unit_from_dtype, iNaT, is_supported_unit, npy_unit_to_abbrev, periods_per_second, to_offset, ) from pandas._libs.tslibs.conversion import precision_from_unit from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import ( array_to_timedelta64, floordiv_object_array, ints_to_pytimedelta, parse_timedelta_unit, truediv_object_array, ) from pandas._typing import ( AxisInt, DateTimeErrorChoices, DtypeObj, NpDtype, npt, ) from pandas.compat.numpy import function as nv from pandas.util._validators import validate_endpoints from pandas.core.dtypes.common import ( TD64NS_DTYPE, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.missing import isna from pandas.core import nanops from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.core.ops import roperator from pandas.core.ops.common import unpack_zerodim_and_defer def _field_accessor(name: str, alias: str, docstring: str): def f(self) -> np.ndarray: values = self.asi8 result = get_timedelta_field(values, alias, reso=self._creso) if self._hasna: result = self._maybe_mask_results( result, fill_value=None, convert="float64" ) return result f.__name__ = name f.__doc__ = f"\n{docstring}\n" return property(f)
null
173,301
from __future__ import annotations from datetime import timedelta import operator from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslibs, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Tick, Timedelta, astype_overflowsafe, get_supported_reso, get_unit_from_dtype, iNaT, is_supported_unit, npy_unit_to_abbrev, periods_per_second, to_offset, ) from pandas._libs.tslibs.conversion import precision_from_unit from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import ( array_to_timedelta64, floordiv_object_array, ints_to_pytimedelta, parse_timedelta_unit, truediv_object_array, ) from pandas._typing import ( AxisInt, DateTimeErrorChoices, DtypeObj, NpDtype, npt, ) from pandas.compat.numpy import function as nv from pandas.util._validators import validate_endpoints from pandas.core.dtypes.common import ( TD64NS_DTYPE, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.missing import isna from pandas.core import nanops from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.core.ops import roperator from pandas.core.ops.common import unpack_zerodim_and_defer DtypeObj = Union[np.dtype, "ExtensionDtype"] def is_dtype_equal(source, target) -> bool: """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ if isinstance(target, str): if not isinstance(source, str): # GH#38516 ensure we get the same behavior from # is_dtype_equal(CDT, "category") and CDT == "category" try: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = get_dtype(source) target = get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this return False def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype def _validate_td64_dtype(dtype) -> DtypeObj: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("timedelta64")): # no precision disallowed GH#24806 msg = ( "Passing in 'timedelta' dtype with no precision is not allowed. " "Please pass in 'timedelta64[ns]' instead." ) raise ValueError(msg) if ( not isinstance(dtype, np.dtype) or dtype.kind != "m" or not is_supported_unit(get_unit_from_dtype(dtype)) ): raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]") return dtype
null
173,302
from __future__ import annotations import re from typing import ( Callable, Union, ) import numpy as np from pandas._libs import ( lib, missing as libmissing, ) from pandas._typing import ( Dtype, Scalar, npt, ) from pandas.compat import pa_version_under7p0 from pandas.core.dtypes.common import ( is_bool_dtype, is_dtype_equal, is_integer_dtype, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays.arrow import ArrowExtensionArray from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import Int64Dtype from pandas.core.arrays.numeric import NumericDtype from pandas.core.arrays.string_ import ( BaseStringArray, StringDtype, ) from pandas.core.strings.object_array import ObjectStringArrayMixin if not pa_version_under7p0: import pyarrow as pa import pyarrow.compute as pc from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning def _chk_pyarrow_available() -> None: if pa_version_under7p0: msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray." raise ImportError(msg)
null
173,303
from __future__ import annotations from functools import wraps from typing import ( TYPE_CHECKING, Any, Literal, Sequence, TypeVar, cast, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.arrays import NDArrayBacked from pandas._typing import ( ArrayLike, AxisInt, Dtype, F, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, SequenceIndexer, Shape, TakeIndexer, npt, type_t, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import ( validate_bool_kwarg, validate_fillna_kwargs, validate_insert_loc, ) from pandas.core.dtypes.common import ( is_dtype_equal, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.missing import array_equivalent from pandas.core import missing from pandas.core.algorithms import ( take, unique, value_counts, ) from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer from pandas.core.sorting import nargminmax def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `ravel_compat` function. Write a Python function `def ravel_compat(meth: F) -> F` to solve the following problem: Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape. Here is the function: def ravel_compat(meth: F) -> F: """ Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape. """ @wraps(meth) def method(self, *args, **kwargs): if self.ndim == 1: return meth(self, *args, **kwargs) flags = self._ndarray.flags flat = self.ravel("K") result = meth(flat, *args, **kwargs) order = "F" if flags.f_contiguous else "C" return result.reshape(self.shape, order=order) return cast(F, method)
Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape.
173,304
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) def _field_accessor(name: str, field: str, docstring=None): def f(self): values = self._local_timestamps() if field in self._bool_ops: result: np.ndarray if field.endswith(("start", "end")): freq = self.freq month_kw = 12 if freq: kwds = freq.kwds month_kw = kwds.get("startingMonth", kwds.get("month", 12)) result = fields.get_start_end_field( values, field, self.freqstr, month_kw, reso=self._creso ) else: result = fields.get_date_field(values, field, reso=self._creso) # these return a boolean by-definition return result if field in self._object_ops: result = fields.get_date_name_field(values, field, reso=self._creso) result = self._maybe_mask_results(result, fill_value=None) else: result = fields.get_date_field(values, field, reso=self._creso) result = self._maybe_mask_results( result, fill_value=None, convert="float64" ) return result f.__name__ = name f.__doc__ = docstring return property(f)
null
173,305
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): """ Pandas ExtensionArray for tz-naive or tz-aware datetime data. .. warning:: DatetimeArray is currently experimental, and its API may change without warning. In particular, :attr:`DatetimeArray.dtype` is expected to change to always be an instance of an ``ExtensionDtype`` subclass. Parameters ---------- values : Series, Index, DatetimeArray, ndarray The datetime data. For DatetimeArray `values` (or a Series or Index boxing one), `dtype` and `freq` will be extracted from `values`. dtype : numpy.dtype or DatetimeTZDtype Note that the only NumPy dtype allowed is 'datetime64[ns]'. freq : str or Offset, optional The frequency. copy : bool, default False Whether to copy the underlying array of values. Attributes ---------- None Methods ------- None """ _typ = "datetimearray" _internal_fill_value = np.datetime64("NaT", "ns") _recognized_scalars = (datetime, np.datetime64) _is_recognized_dtype = is_datetime64_any_dtype _infer_matches = ("datetime", "datetime64", "date") def _scalar_type(self) -> type[Timestamp]: return Timestamp # define my properties & methods for delegation _bool_ops: list[str] = [ "is_month_start", "is_month_end", "is_quarter_start", "is_quarter_end", "is_year_start", "is_year_end", "is_leap_year", ] _object_ops: list[str] = ["freq", "tz"] _field_ops: list[str] = [ "year", "month", "day", "hour", "minute", "second", "weekday", "dayofweek", "day_of_week", "dayofyear", "day_of_year", "quarter", "days_in_month", "daysinmonth", "microsecond", "nanosecond", ] _other_ops: list[str] = ["date", "time", "timetz"] _datetimelike_ops: list[str] = ( _field_ops + _object_ops + _bool_ops + _other_ops + ["unit"] ) _datetimelike_methods: list[str] = [ "to_period", "tz_localize", "tz_convert", "normalize", "strftime", "round", "floor", "ceil", "month_name", "day_name", "as_unit", ] # ndim is inherited from ExtensionArray, must exist to ensure # Timestamp.__richcmp__(DateTimeArray) operates pointwise # ensure that operations with numpy arrays defer to our implementation __array_priority__ = 1000 # ----------------------------------------------------------------- # Constructors _dtype: np.dtype | DatetimeTZDtype _freq: BaseOffset | None = None _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__ def _validate_dtype(cls, values, dtype): # used in TimeLikeOps.__init__ _validate_dt64_dtype(values.dtype) dtype = _validate_dt64_dtype(dtype) return dtype # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" def _simple_new( # type: ignore[override] cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE, ) -> DatetimeArray: assert isinstance(values, np.ndarray) assert dtype.kind == "M" if isinstance(dtype, np.dtype): assert dtype == values.dtype assert not is_unitless(dtype) else: # DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC], # then values.dtype should be M8[us]. assert dtype._creso == get_unit_from_dtype(values.dtype) result = super()._simple_new(values, dtype) result._freq = freq return result def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False): return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy) def _from_sequence_not_strict( cls, data, *, dtype=None, copy: bool = False, tz=lib.no_default, freq: str | BaseOffset | lib.NoDefault | None = lib.no_default, dayfirst: bool = False, yearfirst: bool = False, ambiguous: TimeAmbiguous = "raise", ): """ A non-strict version of _from_sequence, called from DatetimeIndex.__new__. """ explicit_none = freq is None freq = freq if freq is not lib.no_default else None freq, freq_infer = dtl.maybe_infer_freq(freq) # if the user either explicitly passes tz=None or a tz-naive dtype, we # disallows inferring a tz. explicit_tz_none = tz is None if tz is lib.no_default: tz = None else: tz = timezones.maybe_get_tz(tz) dtype = _validate_dt64_dtype(dtype) # if dtype has an embedded tz, capture it tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none) unit = None if dtype is not None: if isinstance(dtype, np.dtype): unit = np.datetime_data(dtype)[0] else: # DatetimeTZDtype unit = dtype.unit subarr, tz, inferred_freq = _sequence_to_dt64ns( data, copy=copy, tz=tz, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous, out_unit=unit, ) # We have to call this again after possibly inferring a tz above _validate_tz_from_dtype(dtype, tz, explicit_tz_none) if tz is not None and explicit_tz_none: raise ValueError( "Passed data is timezone-aware, incompatible with 'tz=None'. " "Use obj.tz_localize(None) instead." ) freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) if explicit_none: freq = None data_unit = np.datetime_data(subarr.dtype)[0] data_dtype = tz_to_dtype(tz, data_unit) result = cls._simple_new(subarr, freq=freq, dtype=data_dtype) if unit is not None and unit != result.unit: # If unit was specified in user-passed dtype, cast to it here result = result.as_unit(unit) if inferred_freq is None and freq is not None: # this condition precludes `freq_infer` cls._validate_frequency(result, freq, ambiguous=ambiguous) elif freq_infer: # Set _freq directly to bypass duplicative _validate_frequency # check. result._freq = to_offset(result.inferred_freq) return result # error: Signature of "_generate_range" incompatible with supertype # "DatetimeLikeArrayMixin" def _generate_range( # type: ignore[override] cls, start, end, periods, freq, tz=None, normalize: bool = False, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", inclusive: IntervalClosedType = "both", *, unit: str | None = None, ) -> DatetimeArray: periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]): raise ValueError("Must provide freq argument if no data is supplied") if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, " "and freq, exactly three must be specified" ) freq = to_offset(freq) if start is not None: start = Timestamp(start) if end is not None: end = Timestamp(end) if start is NaT or end is NaT: raise ValueError("Neither `start` nor `end` can be NaT") if unit is not None: if unit not in ["s", "ms", "us", "ns"]: raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") else: unit = "ns" if start is not None and unit is not None: start = start.as_unit(unit, round_ok=False) if end is not None and unit is not None: end = end.as_unit(unit, round_ok=False) left_inclusive, right_inclusive = validate_inclusive(inclusive) start, end = _maybe_normalize_endpoints(start, end, normalize) tz = _infer_tz_from_endpoints(start, end, tz) if tz is not None: # Localize the start and end arguments start_tz = None if start is None else start.tz end_tz = None if end is None else end.tz start = _maybe_localize_point( start, start_tz, start, freq, tz, ambiguous, nonexistent ) end = _maybe_localize_point( end, end_tz, end, freq, tz, ambiguous, nonexistent ) if freq is not None: # We break Day arithmetic (fixed 24 hour) here and opt for # Day to mean calendar day (23/24/25 hour). Therefore, strip # tz info from start and day to avoid DST arithmetic if isinstance(freq, Day): if start is not None: start = start.tz_localize(None) if end is not None: end = end.tz_localize(None) if isinstance(freq, Tick): i8values = generate_regular_range(start, end, periods, freq, unit=unit) else: xdr = _generate_range( start=start, end=end, periods=periods, offset=freq, unit=unit ) i8values = np.array([x._value for x in xdr], dtype=np.int64) endpoint_tz = start.tz if start is not None else end.tz if tz is not None and endpoint_tz is None: if not timezones.is_utc(tz): # short-circuit tz_localize_to_utc which would make # an unnecessary copy with UTC but be a no-op. creso = abbrev_to_npy_unit(unit) i8values = tzconversion.tz_localize_to_utc( i8values, tz, ambiguous=ambiguous, nonexistent=nonexistent, creso=creso, ) # i8values is localized datetime64 array -> have to convert # start/end as well to compare if start is not None: start = start.tz_localize(tz, ambiguous, nonexistent) if end is not None: end = end.tz_localize(tz, ambiguous, nonexistent) else: # Create a linearly spaced date_range in local time # Nanosecond-granularity timestamps aren't always correctly # representable with doubles, so we limit the range that we # pass to np.linspace as much as possible i8values = ( np.linspace(0, end._value - start._value, periods, dtype="int64") + start._value ) if i8values.dtype != "i8": # 2022-01-09 I (brock) am not sure if it is possible for this # to overflow and cast to e.g. f8, but if it does we need to cast i8values = i8values.astype("i8") if start == end: if not left_inclusive and not right_inclusive: i8values = i8values[1:-1] else: start_i8 = Timestamp(start)._value end_i8 = Timestamp(end)._value if not left_inclusive or not right_inclusive: if not left_inclusive and len(i8values) and i8values[0] == start_i8: i8values = i8values[1:] if not right_inclusive and len(i8values) and i8values[-1] == end_i8: i8values = i8values[:-1] dt64_values = i8values.view(f"datetime64[{unit}]") dtype = tz_to_dtype(tz, unit=unit) return cls._simple_new(dt64_values, freq=freq, dtype=dtype) # ----------------------------------------------------------------- # DatetimeLike Interface def _unbox_scalar(self, value) -> np.datetime64: if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timestamp.") self._check_compatible_with(value) if value is NaT: return np.datetime64(value._value, self.unit) else: return value.as_unit(self.unit).asm8 def _scalar_from_string(self, value) -> Timestamp | NaTType: return Timestamp(value, tz=self.tz) def _check_compatible_with(self, other) -> None: if other is NaT: return self._assert_tzawareness_compat(other) # ----------------------------------------------------------------- # Descriptive Properties def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: # GH#42228 value = x.view("i8") ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz) return ts # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" # incompatible with return type "ExtensionDtype" in supertype # "ExtensionArray" def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override] """ The dtype for the DatetimeArray. .. warning:: A future version of pandas will change dtype to never be a ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will always be an instance of an ``ExtensionDtype`` subclass. Returns ------- numpy.dtype or DatetimeTZDtype If the values are tz-naive, then ``np.dtype('datetime64[ns]')`` is returned. If the values are tz-aware, then the ``DatetimeTZDtype`` is returned. """ return self._dtype def tz(self) -> tzinfo | None: """ Return the timezone. Returns ------- datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None Returns None when the array is tz-naive. """ # GH 18595 return getattr(self.dtype, "tz", None) def tz(self, value): # GH 3746: Prevent localizing or converting the index by setting tz raise AttributeError( "Cannot directly set timezone. Use tz_localize() " "or tz_convert() as appropriate" ) def tzinfo(self) -> tzinfo | None: """ Alias for tz attribute """ return self.tz def is_normalized(self) -> bool: """ Returns True if all of the dates are at midnight ("no time") """ return is_date_array_normalized(self.asi8, self.tz, reso=self._creso) def _resolution_obj(self) -> Resolution: return get_resolution(self.asi8, self.tz, reso=self._creso) # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods def __array__(self, dtype=None) -> np.ndarray: if dtype is None and self.tz: # The default for tz-aware is object, to preserve tz info dtype = object return super().__array__(dtype=dtype) def __iter__(self) -> Iterator: """ Return an iterator over the boxed values Yields ------ tstamp : Timestamp """ if self.ndim > 1: for i in range(len(self)): yield self[i] else: # convert in chunks of 10k for efficiency data = self.asi8 length = len(self) chunksize = 10000 chunks = (length // chunksize) + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, length) converted = ints_to_pydatetime( data[start_i:end_i], tz=self.tz, box="timestamp", reso=self._creso, ) yield from converted def astype(self, dtype, copy: bool = True): # We handle # --> datetime # --> period # DatetimeLikeArrayMixin Super handles the rest. dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self.dtype): if copy: return self.copy() return self elif isinstance(dtype, ExtensionDtype): if not isinstance(dtype, DatetimeTZDtype): # e.g. Sparse[datetime64[ns]] return super().astype(dtype, copy=copy) elif self.tz is None: # pre-2.0 this did self.tz_localize(dtype.tz), which did not match # the Series behavior which did # values.tz_localize("UTC").tz_convert(dtype.tz) raise TypeError( "Cannot use .astype to convert from timezone-naive dtype to " "timezone-aware dtype. Use obj.tz_localize instead or " "series.dt.tz_localize instead" ) else: # tzaware unit conversion e.g. datetime64[s, UTC] np_dtype = np.dtype(dtype.str) res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy) return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq) elif ( self.tz is None and is_datetime64_dtype(dtype) and not is_unitless(dtype) and is_supported_unit(get_unit_from_dtype(dtype)) ): # unit conversion e.g. datetime64[s] res_values = astype_overflowsafe(self._ndarray, dtype, copy=True) return type(self)._simple_new(res_values, dtype=res_values.dtype) # TODO: preserve freq? elif self.tz is not None and is_datetime64_dtype(dtype): # pre-2.0 behavior for DTA/DTI was # values.tz_convert("UTC").tz_localize(None), which did not match # the Series behavior raise TypeError( "Cannot use .astype to convert from timezone-aware dtype to " "timezone-naive dtype. Use obj.tz_localize(None) or " "obj.tz_convert('UTC').tz_localize(None) instead." ) elif ( self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype and is_unitless(dtype) ): raise TypeError( "Casting to unit-less dtype 'datetime64' is not supported. " "Pass e.g. 'datetime64[ns]' instead." ) elif is_period_dtype(dtype): return self.to_period(freq=dtype.freq) return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) # ----------------------------------------------------------------- # Rendering Methods def _format_native_types( self, *, na_rep: str | float = "NaT", date_format=None, **kwargs ) -> npt.NDArray[np.object_]: from pandas.io.formats.format import get_format_datetime64_from_values fmt = get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._creso ) # ----------------------------------------------------------------- # Comparison Methods def _has_same_tz(self, other) -> bool: # vzone shouldn't be None if value is non-datetime like if isinstance(other, np.datetime64): # convert to Timestamp as np.datetime64 doesn't have tz attr other = Timestamp(other) if not hasattr(other, "tzinfo"): return False other_tz = other.tzinfo return timezones.tz_compare(self.tzinfo, other_tz) def _assert_tzawareness_compat(self, other) -> None: # adapted from _Timestamp._assert_tzawareness_compat other_tz = getattr(other, "tzinfo", None) other_dtype = getattr(other, "dtype", None) if is_datetime64tz_dtype(other_dtype): # Get tzinfo from Series dtype other_tz = other.dtype.tz if other is NaT: # pd.NaT quacks both aware and naive pass elif self.tz is None: if other_tz is not None: raise TypeError( "Cannot compare tz-naive and tz-aware datetime-like objects." ) elif other_tz is None: raise TypeError( "Cannot compare tz-naive and tz-aware datetime-like objects" ) # ----------------------------------------------------------------- # Arithmetic Methods def _add_offset(self, offset) -> DatetimeArray: assert not isinstance(offset, Tick) if self.tz is not None: values = self.tz_localize(None) else: values = self try: result = offset._apply_array(values).view(values.dtype) except NotImplementedError: warnings.warn( "Non-vectorized DateOffset being applied to Series or DatetimeIndex.", PerformanceWarning, stacklevel=find_stack_level(), ) result = self.astype("O") + offset result = type(self)._from_sequence(result).as_unit(self.unit) if not len(self): # GH#30336 _from_sequence won't be able to infer self.tz return result.tz_localize(self.tz) else: result = DatetimeArray._simple_new(result, dtype=result.dtype) if self.tz is not None: result = result.tz_localize(self.tz) return result # ----------------------------------------------------------------- # Timezone Conversion and Localization Methods def _local_timestamps(self) -> npt.NDArray[np.int64]: """ Convert to an i8 (unix-like nanosecond timestamp) representation while keeping the local timezone and not using UTC. This is used to calculate time-of-day information as if the timestamps were timezone-naive. """ if self.tz is None or timezones.is_utc(self.tz): # Avoid the copy that would be made in tzconversion return self.asi8 return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) def tz_convert(self, tz) -> DatetimeArray: """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') """ tz = timezones.maybe_get_tz(tz) if self.tz is None: # tz naive, use tz_localize raise TypeError( "Cannot convert tz-naive timestamps, use tz_localize to localize" ) # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz, unit=self.unit) return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) def tz_localize( self, tz, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> DatetimeArray: """ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. This method can also be used to do the inverse -- to create a time zone unaware object from an aware object. To that end, pass `tz=None`. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq=None) With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq=None) Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2018-10-28 01:20:00+02:00 1 2018-10-28 02:36:00+02:00 2 2018-10-28 03:46:00+01:00 dtype: datetime64[ns, CET] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) if self.tz is not None: if tz is None: new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: tz = timezones.maybe_get_tz(tz) # Convert to UTC new_dates = tzconversion.tz_localize_to_utc( self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, creso=self._creso, ) new_dates = new_dates.view(f"M8[{self.unit}]") dtype = tz_to_dtype(tz, unit=self.unit) freq = None if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])): # we can preserve freq # TODO: Also for fixed-offsets freq = self.freq elif tz is None and self.tz is None: # no-op freq = self.freq return self._simple_new(new_dates, dtype=dtype, freq=freq) # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timestamp methods def to_pydatetime(self) -> npt.NDArray[np.object_]: """ Return an ndarray of datetime.datetime objects. Returns ------- numpy.ndarray """ return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso) def normalize(self) -> DatetimeArray: """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso) dt64_values = new_values.view(self._ndarray.dtype) dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) dta = dta._with_freq("infer") if self.tz is not None: dta = dta.tz_localize(self.tz) return dta def to_period(self, freq=None) -> PeriodArray: """ Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]') """ from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn( "Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning, stacklevel=find_stack_level(), ) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError( "You must pass a freq argument as current index has none." ) res = get_period_alias(freq) # https://github.com/pandas-dev/pandas/issues/33358 if res is None: res = freq freq = res return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods def month_name(self, locale=None) -> npt.NDArray[np.object_]: """ Return the month names with specified locale. Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale (``'en_US.utf8'``). Use the command ``locale -a`` on your terminal on Unix systems to find your locale language code. Returns ------- Series or Index Series or Index of month names. Examples -------- >>> s = pd.Series(pd.date_range(start='2018-01', freq='M', periods=3)) >>> s 0 2018-01-31 1 2018-02-28 2 2018-03-31 dtype: datetime64[ns] >>> s.dt.month_name() 0 January 1 February 2 March dtype: object >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') Using the ``locale`` parameter you can set a different locale language, for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month names in Brazilian Portuguese language. >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object') """ values = self._local_timestamps() result = fields.get_date_name_field( values, "month_name", locale=locale, reso=self._creso ) result = self._maybe_mask_results(result, fill_value=None) return result def day_name(self, locale=None) -> npt.NDArray[np.object_]: """ Return the day names with specified locale. Parameters ---------- locale : str, optional Locale determining the language in which to return the day name. Default is English locale (``'en_US.utf8'``). Use the command ``locale -a`` on your terminal on Unix systems to find your locale language code. Returns ------- Series or Index Series or Index of day names. Examples -------- >>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3)) >>> s 0 2018-01-01 1 2018-01-02 2 2018-01-03 dtype: datetime64[ns] >>> s.dt.day_name() 0 Monday 1 Tuesday 2 Wednesday dtype: object >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) >>> idx DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], dtype='datetime64[ns]', freq='D') >>> idx.day_name() Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object') Using the ``locale`` parameter you can set a different locale language, for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day names in Brazilian Portuguese language. >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) >>> idx DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], dtype='datetime64[ns]', freq='D') >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP Index(['Segunda', 'Terça', 'Quarta'], dtype='object') """ values = self._local_timestamps() result = fields.get_date_name_field( values, "day_name", locale=locale, reso=self._creso ) result = self._maybe_mask_results(result, fill_value=None) return result def time(self) -> npt.NDArray[np.object_]: """ Returns numpy array of :class:`datetime.time` objects. The time part of the Timestamps. """ # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="time", reso=self._creso) def timetz(self) -> npt.NDArray[np.object_]: """ Returns numpy array of :class:`datetime.time` objects with timezones. The time part of the Timestamps. """ return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso) def date(self) -> npt.NDArray[np.object_]: """ Returns numpy array of python :class:`datetime.date` objects. Namely, the date part of Timestamps without time and timezone information. """ # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="date", reso=self._creso) def isocalendar(self) -> DataFrame: """ Calculate year, week, and day according to the ISO 8601 standard. .. versionadded:: 1.1.0 Returns ------- DataFrame With columns year, week and day. See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32 """ from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values, reso=self._creso) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df year = _field_accessor( "year", "Y", """ The year of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="Y") ... ) >>> datetime_series 0 2000-12-31 1 2001-12-31 2 2002-12-31 dtype: datetime64[ns] >>> datetime_series.dt.year 0 2000 1 2001 2 2002 dtype: int32 """, ) month = _field_accessor( "month", "M", """ The month as January=1, December=12. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="M") ... ) >>> datetime_series 0 2000-01-31 1 2000-02-29 2 2000-03-31 dtype: datetime64[ns] >>> datetime_series.dt.month 0 1 1 2 2 3 dtype: int32 """, ) day = _field_accessor( "day", "D", """ The day of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="D") ... ) >>> datetime_series 0 2000-01-01 1 2000-01-02 2 2000-01-03 dtype: datetime64[ns] >>> datetime_series.dt.day 0 1 1 2 2 3 dtype: int32 """, ) hour = _field_accessor( "hour", "h", """ The hours of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="h") ... ) >>> datetime_series 0 2000-01-01 00:00:00 1 2000-01-01 01:00:00 2 2000-01-01 02:00:00 dtype: datetime64[ns] >>> datetime_series.dt.hour 0 0 1 1 2 2 dtype: int32 """, ) minute = _field_accessor( "minute", "m", """ The minutes of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="T") ... ) >>> datetime_series 0 2000-01-01 00:00:00 1 2000-01-01 00:01:00 2 2000-01-01 00:02:00 dtype: datetime64[ns] >>> datetime_series.dt.minute 0 0 1 1 2 2 dtype: int32 """, ) second = _field_accessor( "second", "s", """ The seconds of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="s") ... ) >>> datetime_series 0 2000-01-01 00:00:00 1 2000-01-01 00:00:01 2 2000-01-01 00:00:02 dtype: datetime64[ns] >>> datetime_series.dt.second 0 0 1 1 2 2 dtype: int32 """, ) microsecond = _field_accessor( "microsecond", "us", """ The microseconds of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="us") ... ) >>> datetime_series 0 2000-01-01 00:00:00.000000 1 2000-01-01 00:00:00.000001 2 2000-01-01 00:00:00.000002 dtype: datetime64[ns] >>> datetime_series.dt.microsecond 0 0 1 1 2 2 dtype: int32 """, ) nanosecond = _field_accessor( "nanosecond", "ns", """ The nanoseconds of the datetime. Examples -------- >>> datetime_series = pd.Series( ... pd.date_range("2000-01-01", periods=3, freq="ns") ... ) >>> datetime_series 0 2000-01-01 00:00:00.000000000 1 2000-01-01 00:00:00.000000001 2 2000-01-01 00:00:00.000000002 dtype: datetime64[ns] >>> datetime_series.dt.nanosecond 0 0 1 1 2 2 dtype: int32 """, ) _dayofweek_doc = """ The day of the week with Monday=0, Sunday=6. Return the day of the week. It is assumed the week starts on Monday, which is denoted by 0 and ends on Sunday which is denoted by 6. This method is available on both Series with datetime values (using the `dt` accessor) or DatetimeIndex. Returns ------- Series or Index Containing integers indicating the day number. See Also -------- Series.dt.dayofweek : Alias. Series.dt.weekday : Alias. Series.dt.day_name : Returns the name of the day of the week. Examples -------- >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series() >>> s.dt.dayofweek 2016-12-31 5 2017-01-01 6 2017-01-02 0 2017-01-03 1 2017-01-04 2 2017-01-05 3 2017-01-06 4 2017-01-07 5 2017-01-08 6 Freq: D, dtype: int32 """ day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc) dayofweek = day_of_week weekday = day_of_week day_of_year = _field_accessor( "dayofyear", "doy", """ The ordinal day of the year. """, ) dayofyear = day_of_year quarter = _field_accessor( "quarter", "q", """ The quarter of the date. """, ) days_in_month = _field_accessor( "days_in_month", "dim", """ The number of days in the month. """, ) daysinmonth = days_in_month _is_month_doc = """ Indicates whether the date is the {first_or_last} day of the month. Returns ------- Series or array For Series, returns a Series with boolean values. For DatetimeIndex, returns a boolean array. See Also -------- is_month_start : Return a boolean indicating whether the date is the first day of the month. is_month_end : Return a boolean indicating whether the date is the last day of the month. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> s = pd.Series(pd.date_range("2018-02-27", periods=3)) >>> s 0 2018-02-27 1 2018-02-28 2 2018-03-01 dtype: datetime64[ns] >>> s.dt.is_month_start 0 False 1 False 2 True dtype: bool >>> s.dt.is_month_end 0 False 1 True 2 False dtype: bool >>> idx = pd.date_range("2018-02-27", periods=3) >>> idx.is_month_start array([False, False, True]) >>> idx.is_month_end array([False, True, False]) """ is_month_start = _field_accessor( "is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first") ) is_month_end = _field_accessor( "is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last") ) is_quarter_start = _field_accessor( "is_quarter_start", "is_quarter_start", """ Indicator for whether the date is the first day of a quarter. Returns ------- is_quarter_start : Series or DatetimeIndex The same type as the original data with boolean values. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- quarter : Return the quarter of the date. is_quarter_end : Similar property for indicating the quarter end. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", ... periods=4)}) >>> df.assign(quarter=df.dates.dt.quarter, ... is_quarter_start=df.dates.dt.is_quarter_start) dates quarter is_quarter_start 0 2017-03-30 1 False 1 2017-03-31 1 False 2 2017-04-01 2 True 3 2017-04-02 2 False >>> idx = pd.date_range('2017-03-30', periods=4) >>> idx DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], dtype='datetime64[ns]', freq='D') >>> idx.is_quarter_start array([False, False, True, False]) """, ) is_quarter_end = _field_accessor( "is_quarter_end", "is_quarter_end", """ Indicator for whether the date is the last day of a quarter. Returns ------- is_quarter_end : Series or DatetimeIndex The same type as the original data with boolean values. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- quarter : Return the quarter of the date. is_quarter_start : Similar property indicating the quarter start. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", ... periods=4)}) >>> df.assign(quarter=df.dates.dt.quarter, ... is_quarter_end=df.dates.dt.is_quarter_end) dates quarter is_quarter_end 0 2017-03-30 1 False 1 2017-03-31 1 True 2 2017-04-01 2 False 3 2017-04-02 2 False >>> idx = pd.date_range('2017-03-30', periods=4) >>> idx DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], dtype='datetime64[ns]', freq='D') >>> idx.is_quarter_end array([False, True, False, False]) """, ) is_year_start = _field_accessor( "is_year_start", "is_year_start", """ Indicate whether the date is the first day of a year. Returns ------- Series or DatetimeIndex The same type as the original data with boolean values. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- is_year_end : Similar property indicating the last day of the year. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) >>> dates 0 2017-12-30 1 2017-12-31 2 2018-01-01 dtype: datetime64[ns] >>> dates.dt.is_year_start 0 False 1 False 2 True dtype: bool >>> idx = pd.date_range("2017-12-30", periods=3) >>> idx DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') >>> idx.is_year_start array([False, False, True]) """, ) is_year_end = _field_accessor( "is_year_end", "is_year_end", """ Indicate whether the date is the last day of the year. Returns ------- Series or DatetimeIndex The same type as the original data with boolean values. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- is_year_start : Similar property indicating the start of the year. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) >>> dates 0 2017-12-30 1 2017-12-31 2 2018-01-01 dtype: datetime64[ns] >>> dates.dt.is_year_end 0 False 1 True 2 False dtype: bool >>> idx = pd.date_range("2017-12-30", periods=3) >>> idx DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') >>> idx.is_year_end array([False, True, False]) """, ) is_leap_year = _field_accessor( "is_leap_year", "is_leap_year", """ Boolean indicator if the date belongs to a leap year. A leap year is a year, which has 366 days (instead of 365) including 29th of February as an intercalary day. Leap years are years which are multiples of four with the exception of years divisible by 100 but not by 400. Returns ------- Series or ndarray Booleans indicating if dates belong to a leap year. Examples -------- This method is available on Series with datetime values under the ``.dt`` accessor, and directly on DatetimeIndex. >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y") >>> idx DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'], dtype='datetime64[ns]', freq='A-DEC') >>> idx.is_leap_year array([ True, False, False]) >>> dates_series = pd.Series(idx) >>> dates_series 0 2012-12-31 1 2013-12-31 2 2014-12-31 dtype: datetime64[ns] >>> dates_series.dt.is_leap_year 0 True 1 False 2 False dtype: bool """, ) def to_julian_date(self) -> npt.NDArray[np.float64]: """ Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. https://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return ( day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1_721_118.5 + ( self.hour + self.minute / 60 + self.second / 3600 + self.microsecond / 3600 / 10**6 + self.nanosecond / 3600 / 10**9 ) / 24 ) # ----------------------------------------------------------------- # Reductions def std( self, axis=None, dtype=None, out=None, ddof: int = 1, keepdims: bool = False, skipna: bool = True, ): """ Return sample standard deviation over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument Parameters ---------- axis : int optional, default None Axis for the function to be applied on. For `Series` this parameter is unused and defaults to `None`. ddof : int, default 1 Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Timedelta """ # Because std is translation-invariant, we can get self.std # by calculating (self - Timestamp(0)).std, and we can do it # without creating a copy by using a view on self._ndarray from pandas.core.arrays import TimedeltaArray # Find the td64 dtype with the same resolution as our dt64 dtype dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64") dtype = np.dtype(dtype_str) tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype) return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna) def objects_to_datetime64ns( data: np.ndarray, dayfirst, yearfirst, utc: bool = False, errors: DateTimeErrorChoices = "raise", allow_object: bool = False, ): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert/localize timestamps to UTC. errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, ) if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view("i8"), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result) def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): """ Convert data based on dtype conventions, issuing errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool tz : tzinfo or None, default None Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if not hasattr(data, "dtype"): # e.g. collections.deque return data, copy if is_float_dtype(data.dtype): # pre-2.0 we treated these as wall-times, inconsistent with ints # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes. # Note: data.astype(np.int64) fails ARM tests, see # https://github.com/pandas-dev/pandas/issues/49468. data = data.astype(DT64NS_DTYPE).view("i8") copy = False elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype): # GH#29794 enforcing deprecation introduced in GH#23539 raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") elif is_period_dtype(data.dtype): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError( "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" ) elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype): # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None: """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError( f"data is already tz-aware {inferred_tz}, unable to " f"set specified tz: {tz}" ) return tz class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"] DT64NS_DTYPE = conversion.DT64NS_DTYPE INT64_DTYPE = np.dtype(np.int64) def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_sparse(arr) -> bool: """ Check whether an array-like is a 1-D pandas sparse array. Check that the one-dimensional array-like is a pandas sparse array. Returns True if it is a pandas sparse array, not another type of sparse array. Parameters ---------- arr : array-like Array-like to check. Returns ------- bool Whether or not the array-like is a pandas sparse array. Examples -------- Returns `True` if the parameter is a 1-D pandas sparse array. >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0])) True >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0]))) True Returns `False` if the parameter is not sparse. >>> is_sparse(np.array([0, 0, 1, 0])) False >>> is_sparse(pd.Series([0, 1, 0, 0])) False Returns `False` if the parameter is not a pandas sparse array. >>> from scipy.sparse import bsr_matrix >>> is_sparse(bsr_matrix([0, 1, 0, 0])) False Returns `False` if the parameter has more than one dimension. """ from pandas.core.arrays.sparse import SparseDtype dtype = getattr(arr, "dtype", arr) return isinstance(dtype, SparseDtype) def is_datetime64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64_dtype >>> is_datetime64_dtype(object) False >>> is_datetime64_dtype(np.datetime64) True >>> is_datetime64_dtype(np.array([], dtype=int)) False >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) True >>> is_datetime64_dtype([1, 2, 3]) False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "M" return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) def is_datetime64tz_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of a DatetimeTZDtype dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True """ if isinstance(arr_or_dtype, DatetimeTZDtype): # GH#33400 fastpath for dtype object # GH 34986 return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_string_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the string dtype. If an array is passed with an object dtype, the elements must be inferred as strings. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False >>> is_string_dtype(pd.Series([1, 2], dtype=object)) False """ if hasattr(arr_or_dtype, "dtype") and get_dtype(arr_or_dtype).kind == "O": return is_all_strings(arr_or_dtype) def condition(dtype) -> bool: if is_string_or_object_np_dtype(dtype): return True try: return dtype == "string" except TypeError: return False return _is_dtype(arr_or_dtype, condition) The provided code snippet includes necessary dependencies for implementing the `_sequence_to_dt64ns` function. Write a Python function `def _sequence_to_dt64ns( data, *, copy: bool = False, tz: tzinfo | None = None, dayfirst: bool = False, yearfirst: bool = False, ambiguous: TimeAmbiguous = "raise", out_unit: str | None = None, )` to solve the following problem: Parameters ---------- data : list-like copy : bool, default False tz : tzinfo or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. out_unit : str or None, default None Desired output resolution. Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed Here is the function: def _sequence_to_dt64ns( data, *, copy: bool = False, tz: tzinfo | None = None, dayfirst: bool = False, yearfirst: bool = False, ambiguous: TimeAmbiguous = "raise", out_unit: str | None = None, ): """ Parameters ---------- data : list-like copy : bool, default False tz : tzinfo or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. out_unit : str or None, default None Desired output resolution. Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed """ inferred_freq = None data, copy = dtl.ensure_arraylike_for_datetimelike( data, copy, cls_name="DatetimeArray" ) if isinstance(data, DatetimeArray): inferred_freq = data.freq # By this point we are assured to have either a numpy array or Index data, copy = maybe_convert_dtype(data, copy, tz=tz) data_dtype = getattr(data, "dtype", None) out_dtype = DT64NS_DTYPE if out_unit is not None: out_dtype = np.dtype(f"M8[{out_unit}]") if ( is_object_dtype(data_dtype) or is_string_dtype(data_dtype) or is_sparse(data_dtype) ): # TODO: We do not have tests specific to string-dtypes, # also complex or categorical or other extension copy = False if lib.infer_dtype(data, skipna=False) == "integer": data = data.astype(np.int64) elif tz is not None and ambiguous == "raise": # TODO: yearfirst/dayfirst/etc? obj_data = np.asarray(data, dtype=object) i8data = tslib.array_to_datetime_with_tz(obj_data, tz) return i8data.view(DT64NS_DTYPE), tz, None else: # data comes back here as either i8 to denote UTC timestamps # or M8[ns] to denote wall times data, inferred_tz = objects_to_datetime64ns( data, dayfirst=dayfirst, yearfirst=yearfirst, allow_object=False, ) if tz and inferred_tz: # two timezones: convert to intended from base UTC repr assert data.dtype == "i8" # GH#42505 # by convention, these are _already_ UTC, e.g return data.view(DT64NS_DTYPE), tz, None elif inferred_tz: tz = inferred_tz data_dtype = data.dtype # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. if is_datetime64tz_dtype(data_dtype): # DatetimeArray -> ndarray tz = _maybe_infer_tz(tz, data.tz) result = data._ndarray elif is_datetime64_dtype(data_dtype): # tz-naive DatetimeArray or ndarray[datetime64] data = getattr(data, "_ndarray", data) new_dtype = data.dtype data_unit = get_unit_from_dtype(new_dtype) if not is_supported_unit(data_unit): # Cast to the nearest supported unit, generally "s" new_reso = get_supported_reso(data_unit) new_unit = npy_unit_to_abbrev(new_reso) new_dtype = np.dtype(f"M8[{new_unit}]") data = astype_overflowsafe(data, dtype=new_dtype, copy=False) data_unit = get_unit_from_dtype(new_dtype) copy = False if data.dtype.byteorder == ">": # TODO: better way to handle this? non-copying alternative? # without this, test_constructor_datetime64_bigendian fails data = data.astype(data.dtype.newbyteorder("<")) new_dtype = data.dtype copy = False if tz is not None: # Convert tz-naive to UTC # TODO: if tz is UTC, are there situations where we *don't* want a # copy? tz_localize_to_utc always makes one. shape = data.shape if data.ndim > 1: data = data.ravel() data = tzconversion.tz_localize_to_utc( data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit ) data = data.view(new_dtype) data = data.reshape(shape) assert data.dtype == new_dtype, data.dtype result = data else: # must be integer dtype otherwise # assume this data are epoch timestamps if data.dtype != INT64_DTYPE: data = data.astype(np.int64, copy=False) result = data.view(out_dtype) if copy: result = result.copy() assert isinstance(result, np.ndarray), type(result) assert result.dtype.kind == "M" assert result.dtype != "M8" assert is_supported_unit(get_unit_from_dtype(result.dtype)) return result, tz, inferred_freq
Parameters ---------- data : list-like copy : bool, default False tz : tzinfo or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. out_unit : str or None, default None Desired output resolution. Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed
173,306
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... def is_dtype_equal(source, target) -> bool: """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ if isinstance(target, str): if not isinstance(source, str): # GH#38516 ensure we get the same behavior from # is_dtype_equal(CDT, "category") and CDT == "category" try: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = get_dtype(source) target = get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this return False def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] The provided code snippet includes necessary dependencies for implementing the `_validate_dt64_dtype` function. Write a Python function `def _validate_dt64_dtype(dtype)` to solve the following problem: Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike _validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through Here is the function: def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike _validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("M8")): # no precision, disallowed GH#24806 msg = ( "Passing in 'datetime64' dtype with no precision is not allowed. " "Please pass in 'datetime64[ns]' instead." ) raise ValueError(msg) if ( isinstance(dtype, np.dtype) and (dtype.kind != "M" or not is_supported_unit(get_unit_from_dtype(dtype))) ) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)): raise ValueError( f"Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', " "'datetime64[ns]' or DatetimeTZDtype'." ) if getattr(dtype, "tz", None): # https://github.com/pandas-dev/pandas/issues/18595 # Ensure that we have a standard timezone for pytz objects. # Without this, things like adding an array of timedeltas and # a tz-aware Timestamp (with a tz specific to its datetime) will # be incorrect(ish?) for the array as a whole dtype = cast(DatetimeTZDtype, dtype) dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz)) return dtype
Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike _validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through
173,307
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... def is_datetime64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64_dtype >>> is_datetime64_dtype(object) False >>> is_datetime64_dtype(np.datetime64) True >>> is_datetime64_dtype(np.array([], dtype=int)) False >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) True >>> is_datetime64_dtype([1, 2, 3]) False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "M" return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] The provided code snippet includes necessary dependencies for implementing the `_validate_tz_from_dtype` function. Write a Python function `def _validate_tz_from_dtype( dtype, tz: tzinfo | None, explicit_tz_none: bool = False ) -> tzinfo | None` to solve the following problem: If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo explicit_tz_none : bool, default False Whether tz=None was passed explicitly, as opposed to lib.no_default. Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch Here is the function: def _validate_tz_from_dtype( dtype, tz: tzinfo | None, explicit_tz_none: bool = False ) -> tzinfo | None: """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo explicit_tz_none : bool, default False Whether tz=None was passed explicitly, as opposed to lib.no_default. Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, "tz", None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype with a tz") if explicit_tz_none: raise ValueError("Cannot pass both a timezone-aware dtype and tz=None") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError( "cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])" ) return tz
If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo explicit_tz_none : bool, default False Whether tz=None was passed explicitly, as opposed to lib.no_default. Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch
173,308
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... The provided code snippet includes necessary dependencies for implementing the `_infer_tz_from_endpoints` function. Write a Python function `def _infer_tz_from_endpoints( start: Timestamp, end: Timestamp, tz: tzinfo | None ) -> tzinfo | None` to solve the following problem: If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree Here is the function: def _infer_tz_from_endpoints( start: Timestamp, end: Timestamp, tz: tzinfo | None ) -> tzinfo | None: """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except AssertionError as err: # infer_tzinfo raises AssertionError if passed mismatched timezones raise TypeError( "Start and end cannot both be tz-aware with different timezones" ) from err inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed time zone") elif inferred_tz is not None: tz = inferred_tz return tz
If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree
173,309
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) def _maybe_normalize_endpoints( start: Timestamp | None, end: Timestamp | None, normalize: bool ): if normalize: if start is not None: start = start.normalize() if end is not None: end = end.normalize() return start, end
null
173,310
from __future__ import annotations from datetime import ( datetime, time, timedelta, tzinfo, ) from typing import ( TYPE_CHECKING, Iterator, cast, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_reso, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_unit, is_unitless, normalize_i8_timestamps, npy_unit_to_abbrev, timezones, to_offset, tz_convert_from_utc, tzconversion, ) from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas._typing import ( DateTimeErrorChoices, IntervalClosedType, TimeAmbiguous, TimeNonexistent, npt, ) from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_object_dtype, is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( Day, Tick, ) The provided code snippet includes necessary dependencies for implementing the `_maybe_localize_point` function. Write a Python function `def _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent)` to solve the following problem: Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None ambiguous: str, localization behavior for ambiguous times nonexistent: str, localization behavior for nonexistent times Returns ------- ts : Timestamp Here is the function: def _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None ambiguous: str, localization behavior for ambiguous times nonexistent: str, localization behavior for nonexistent times Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if is_none is None and is_not_none is not None: # Note: We can't ambiguous='infer' a singular ambiguous time; however, # we have historically defaulted ambiguous=False ambiguous = ambiguous if ambiguous != "infer" else False localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None} if isinstance(freq, Tick) or freq is None: localize_args["tz"] = tz ts = ts.tz_localize(**localize_args) return ts
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None ambiguous: str, localization behavior for ambiguous times nonexistent: str, localization behavior for nonexistent times Returns ------- ts : Timestamp
173,311
from __future__ import annotations from collections import abc import numbers import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib import pandas._libs.sparse as splib from pandas._libs.sparse import ( BlockIndex, IntIndex, SparseIndex, ) from pandas._libs.tslibs import NaT from pandas._typing import ( ArrayLike, AstypeArg, Axis, AxisInt, Dtype, NpDtype, PositionalIndexer, Scalar, ScalarIndexer, SequenceIndexer, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_bool_kwarg, validate_insert_loc, ) from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, find_common_type, maybe_box_datetimelike, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dtype_equal, is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core import ( arraylike, ops, ) import pandas.core.algorithms as algos from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse.dtype import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import ( check_array_indexer, unpack_tuple_and_ellipses, ) from pandas.core.missing import interpolate_2d from pandas.core.nanops import check_below_min_count from pandas.io.formats import printing def _get_fill(arr: SparseArray) -> np.ndarray: """ Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing """ try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value) def _wrap_result( name: str, data, sparse_index, fill_value, dtype: Dtype | None = None ) -> SparseArray: """ wrap op result to have correct dtype """ if name.startswith("__"): # e.g. __eq__ --> eq name = name[2:-2] if name in ("eq", "ne", "lt", "gt", "le", "ge"): dtype = bool fill_value = lib.item_from_zerodim(fill_value) if is_bool_dtype(dtype): # fill_value may be np.bool_ fill_value = bool(fill_value) return SparseArray( data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype ) class SparseArray(OpsMixin, PandasObject, ExtensionArray): """ An ExtensionArray for storing sparse data. Parameters ---------- data : array-like or scalar A dense array of values to store in the SparseArray. This may contain `fill_value`. sparse_index : SparseIndex, optional fill_value : scalar, optional Elements in data that are ``fill_value`` are not stored in the SparseArray. For memory savings, this should be the most common value in `data`. By default, `fill_value` depends on the dtype of `data`: =========== ========== data.dtype na_value =========== ========== float ``np.nan`` int ``0`` bool False datetime64 ``pd.NaT`` timedelta64 ``pd.NaT`` =========== ========== The fill value is potentially specified in three ways. In order of precedence, these are 1. The `fill_value` argument 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is a ``SparseDtype`` 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype` is not a ``SparseDtype`` and `data` is a ``SparseArray``. kind : str Can be 'integer' or 'block', default is 'integer'. The type of storage for sparse locations. * 'block': Stores a `block` and `block_length` for each contiguous *span* of sparse values. This is best when sparse data tends to be clumped together, with large regions of ``fill-value`` values between sparse values. * 'integer': uses an integer to store the location of each sparse value. dtype : np.dtype or SparseDtype, optional The dtype to use for the SparseArray. For numpy dtypes, this determines the dtype of ``self.sp_values``. For SparseDtype, this determines ``self.sp_values`` and ``self.fill_value``. copy : bool, default False Whether to explicitly copy the incoming `data` array. Attributes ---------- None Methods ------- None Examples -------- >>> from pandas.arrays import SparseArray >>> arr = SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) """ _subtyp = "sparse_array" # register ABCSparseArray _hidden_attrs = PandasObject._hidden_attrs | frozenset([]) _sparse_index: SparseIndex _sparse_values: np.ndarray _dtype: SparseDtype def __init__( self, data, sparse_index=None, fill_value=None, kind: SparseIndexKind = "integer", dtype: Dtype | None = None, copy: bool = False, ) -> None: if fill_value is None and isinstance(dtype, SparseDtype): fill_value = dtype.fill_value if isinstance(data, type(self)): # disable normal inference on dtype, sparse_index, & fill_value if sparse_index is None: sparse_index = data.sp_index if fill_value is None: fill_value = data.fill_value if dtype is None: dtype = data.dtype # TODO: make kind=None, and use data.kind? data = data.sp_values # Handle use-provided dtype if isinstance(dtype, str): # Two options: dtype='int', regular numpy dtype # or dtype='Sparse[int]', a sparse dtype try: dtype = SparseDtype.construct_from_string(dtype) except TypeError: dtype = pandas_dtype(dtype) if isinstance(dtype, SparseDtype): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype if is_scalar(data): if sparse_index is None: npoints = 1 else: npoints = sparse_index.length data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None) dtype = data.dtype if dtype is not None: dtype = pandas_dtype(dtype) # TODO: disentangle the fill_value dtype inference from # dtype inference if data is None: # TODO: What should the empty dtype be? Object or float? # error: Argument "dtype" to "array" has incompatible type # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any], # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" data = np.array([], dtype=dtype) # type: ignore[arg-type] if not is_array_like(data): try: # probably shared code in sanitize_series data = sanitize_array(data, index=None) except ValueError: # NumPy may raise a ValueError on data like [1, []] # we retry with object dtype here. if dtype is None: dtype = np.dtype(object) data = np.atleast_1d(np.asarray(data, dtype=dtype)) else: raise if copy: # TODO: avoid double copy when dtype forces cast. data = data.copy() if fill_value is None: fill_value_dtype = data.dtype if dtype is None else dtype if fill_value_dtype is None: fill_value = np.nan else: fill_value = na_value_for_dtype(fill_value_dtype) if isinstance(data, type(self)) and sparse_index is None: sparse_index = data._sparse_index # error: Argument "dtype" to "asarray" has incompatible type # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" sparse_values = np.asarray( data.sp_values, dtype=dtype # type: ignore[arg-type] ) elif sparse_index is None: data = extract_array(data, extract_numpy=True) if not isinstance(data, np.ndarray): # EA if is_datetime64tz_dtype(data.dtype): warnings.warn( f"Creating SparseArray from {data.dtype} data " "loses timezone information. Cast to object before " "sparse to retain timezone information.", UserWarning, stacklevel=find_stack_level(), ) data = np.asarray(data, dtype="datetime64[ns]") if fill_value is NaT: fill_value = np.datetime64("NaT", "ns") data = np.asarray(data) sparse_values, sparse_index, fill_value = _make_sparse( # error: Argument "dtype" to "_make_sparse" has incompatible type # "Union[ExtensionDtype, dtype[Any], None]"; expected # "Optional[dtype[Any]]" data, kind=kind, fill_value=fill_value, dtype=dtype, # type: ignore[arg-type] ) else: # error: Argument "dtype" to "asarray" has incompatible type # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type] if len(sparse_values) != sparse_index.npoints: raise AssertionError( f"Non array-like type {type(sparse_values)} must " "have the same length as the index" ) self._sparse_index = sparse_index self._sparse_values = sparse_values self._dtype = SparseDtype(sparse_values.dtype, fill_value) def _simple_new( cls: type[SparseArrayT], sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype, ) -> SparseArrayT: new = object.__new__(cls) new._sparse_index = sparse_index new._sparse_values = sparse_array new._dtype = dtype return new def from_spmatrix(cls: type[SparseArrayT], data: spmatrix) -> SparseArrayT: """ Create a SparseArray from a scipy.sparse matrix. Parameters ---------- data : scipy.sparse.sp_matrix This should be a SciPy sparse matrix where the size of the second dimension is 1. In other words, a sparse matrix with a single column. Returns ------- SparseArray Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.coo_matrix((4, 1)) >>> pd.arrays.SparseArray.from_spmatrix(mat) [0.0, 0.0, 0.0, 0.0] Fill: 0.0 IntIndex Indices: array([], dtype=int32) """ length, ncol = data.shape if ncol != 1: raise ValueError(f"'data' must have a single column, not '{ncol}'") # our sparse index classes require that the positions be strictly # increasing. So we need to sort loc, and arr accordingly. data = data.tocsc() data.sort_indices() arr = data.data idx = data.indices zero = np.array(0, dtype=arr.dtype).item() dtype = SparseDtype(arr.dtype, zero) index = IntIndex(length, idx) return cls._simple_new(arr, index, dtype) def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: fill_value = self.fill_value if self.sp_index.ngaps == 0: # Compat for na dtype and int values. return self.sp_values if dtype is None: # Can NumPy represent this type? # If not, `np.result_type` will raise. We catch that # and return object. if is_datetime64_any_dtype(self.sp_values.dtype): # However, we *do* special-case the common case of # a datetime64 with pandas NaT. if fill_value is NaT: # Can't put pd.NaT in a datetime64[ns] fill_value = np.datetime64("NaT") try: dtype = np.result_type(self.sp_values.dtype, type(fill_value)) except TypeError: dtype = object out = np.full(self.shape, fill_value, dtype=dtype) out[self.sp_index.indices] = self.sp_values return out def __setitem__(self, key, value): # I suppose we could allow setting of non-fill_value elements. # TODO(SparseArray.__setitem__): remove special cases in # ExtensionBlock.where msg = "SparseArray does not support item assignment via setitem" raise TypeError(msg) def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): return cls(scalars, dtype=dtype) def _from_factorized(cls, values, original): return cls(values, dtype=original.dtype) # ------------------------------------------------------------------------ # Data # ------------------------------------------------------------------------ def sp_index(self) -> SparseIndex: """ The SparseIndex containing the location of non- ``fill_value`` points. """ return self._sparse_index def sp_values(self) -> np.ndarray: """ An ndarray containing the non- ``fill_value`` values. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0) >>> s.sp_values array([1, 2]) """ return self._sparse_values def dtype(self) -> SparseDtype: return self._dtype def fill_value(self): """ Elements in `data` that are `fill_value` are not stored. For memory savings, this should be the most common value in the array. """ return self.dtype.fill_value def fill_value(self, value) -> None: self._dtype = SparseDtype(self.dtype.subtype, value) def kind(self) -> SparseIndexKind: """ The kind of sparse index for this array. One of {'integer', 'block'}. """ if isinstance(self.sp_index, IntIndex): return "integer" else: return "block" def _valid_sp_values(self) -> np.ndarray: sp_vals = self.sp_values mask = notna(sp_vals) return sp_vals[mask] def __len__(self) -> int: return self.sp_index.length def _null_fill_value(self) -> bool: return self._dtype._is_na_fill_value def _fill_value_matches(self, fill_value) -> bool: if self._null_fill_value: return isna(fill_value) else: return self.fill_value == fill_value def nbytes(self) -> int: return self.sp_values.nbytes + self.sp_index.nbytes def density(self) -> float: """ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 """ return self.sp_index.npoints / self.sp_index.length def npoints(self) -> int: """ The number of non- ``fill_value`` points. Examples -------- >>> from pandas.arrays import SparseArray >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.npoints 3 """ return self.sp_index.npoints def isna(self): # If null fill value, we want SparseDtype[bool, true] # to preserve the same memory usage. dtype = SparseDtype(bool, self._null_fill_value) if self._null_fill_value: return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype) mask = np.full(len(self), False, dtype=np.bool_) mask[self.sp_index.indices] = isna(self.sp_values) return type(self)(mask, fill_value=False, dtype=dtype) def fillna( self: SparseArrayT, value=None, method: FillnaOptions | None = None, limit: int | None = None, ) -> SparseArrayT: """ Fill missing values with `value`. Parameters ---------- value : scalar, optional method : str, optional .. warning:: Using 'method' will result in high memory use, as all `fill_value` methods will be converted to an in-memory ndarray limit : int, optional Returns ------- SparseArray Notes ----- When `value` is specified, the result's ``fill_value`` depends on ``self.fill_value``. The goal is to maintain low-memory use. If ``self.fill_value`` is NA, the result dtype will be ``SparseDtype(self.dtype, fill_value=value)``. This will preserve amount of memory used before and after filling. When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used. """ if (method is None and value is None) or ( method is not None and value is not None ): raise ValueError("Must specify one of 'method' or 'value'.") if method is not None: msg = "fillna with 'method' requires high memory usage." warnings.warn( msg, PerformanceWarning, stacklevel=find_stack_level(), ) new_values = np.asarray(self) # interpolate_2d modifies new_values inplace interpolate_2d(new_values, method=method, limit=limit) return type(self)(new_values, fill_value=self.fill_value) else: new_values = np.where(isna(self.sp_values), value, self.sp_values) if self._null_fill_value: # This is essentially just updating the dtype. new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype) def shift(self: SparseArrayT, periods: int = 1, fill_value=None) -> SparseArrayT: if not len(self) or periods == 0: return self.copy() if isna(fill_value): fill_value = self.dtype.na_value subtype = np.result_type(fill_value, self.dtype.subtype) if subtype != self.dtype.subtype: # just coerce up front arr = self.astype(SparseDtype(subtype, self.fill_value)) else: arr = self empty = self._from_sequence( [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype ) if periods > 0: a = empty b = arr[:-periods] else: a = arr[abs(periods) :] b = empty return arr._concat_same_type([a, b]) def _first_fill_value_loc(self): """ Get the location of the first fill value. Returns ------- int """ if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.indices if not len(indices) or indices[0] > 0: return 0 # a number larger than 1 should be appended to # the last in case of fill value only appears # in the tail of array diff = np.r_[np.diff(indices), 2] return indices[(diff > 1).argmax()] + 1 def unique(self: SparseArrayT) -> SparseArrayT: uniques = algos.unique(self.sp_values) if len(self.sp_values) != len(self): fill_loc = self._first_fill_value_loc() # Inorder to align the behavior of pd.unique or # pd.Series.unique, we should keep the original # order, here we use unique again to find the # insertion place. Since the length of sp_values # is not large, maybe minor performance hurt # is worthwhile to the correctness. insert_loc = len(algos.unique(self.sp_values[:fill_loc])) uniques = np.insert(uniques, insert_loc, self.fill_value) return type(self)._from_sequence(uniques, dtype=self.dtype) def _values_for_factorize(self): # Still override this for hash_pandas_object return np.asarray(self), self.fill_value def factorize( self, use_na_sentinel: bool = True, ) -> tuple[np.ndarray, SparseArray]: # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA] # The sparsity on this is backwards from what Sparse would want. Want # ExtensionArray.factorize -> Tuple[EA, EA] # Given that we have to return a dense array of codes, why bother # implementing an efficient factorize? codes, uniques = algos.factorize( np.asarray(self), use_na_sentinel=use_na_sentinel ) uniques_sp = SparseArray(uniques, dtype=self.dtype) return codes, uniques_sp def value_counts(self, dropna: bool = True) -> Series: """ Returns a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """ from pandas import ( Index, Series, ) keys, counts = algos.value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0 and (not self._null_fill_value or not dropna): mask = isna(keys) if self._null_fill_value else keys == self.fill_value if mask.any(): counts[mask] += fcounts else: # error: Argument 1 to "insert" has incompatible type "Union[ # ExtensionArray,ndarray[Any, Any]]"; expected "Union[ # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]], # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]" keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type] counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndex): index = Index(keys) else: index = keys return Series(counts, index=index, copy=False) # -------- # Indexing # -------- def __getitem__(self, key: ScalarIndexer) -> Any: ... def __getitem__( self: SparseArrayT, key: SequenceIndexer | tuple[int | ellipsis, ...], ) -> SparseArrayT: ... def __getitem__( self: SparseArrayT, key: PositionalIndexer | tuple[int | ellipsis, ...], ) -> SparseArrayT | Any: if isinstance(key, tuple): key = unpack_tuple_and_ellipses(key) if key is Ellipsis: raise ValueError("Cannot slice with Ellipsis") if is_integer(key): return self._get_val_at(key) elif isinstance(key, tuple): # error: Invalid index type "Tuple[Union[int, ellipsis], ...]" # for "ndarray[Any, Any]"; expected type # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_, # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[ # Union[bool_, integer[Any]]]]], _NestedSequence[Union[ # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[ # dtype[Union[bool_, integer[Any]]]], _NestedSequence[ # _SupportsArray[dtype[Union[bool_, integer[Any]]]]], # _NestedSequence[Union[bool, int]]], ...]]" data_slice = self.to_dense()[key] # type: ignore[index] elif isinstance(key, slice): # Avoid densifying when handling contiguous slices if key.step is None or key.step == 1: start = 0 if key.start is None else key.start if start < 0: start += len(self) end = len(self) if key.stop is None else key.stop if end < 0: end += len(self) indices = self.sp_index.indices keep_inds = np.flatnonzero((indices >= start) & (indices < end)) sp_vals = self.sp_values[keep_inds] sp_index = indices[keep_inds].copy() # If we've sliced to not include the start of the array, all our indices # should be shifted. NB: here we are careful to also not shift by a # negative value for a case like [0, 1][-100:] where the start index # should be treated like 0 if start > 0: sp_index -= start # Length of our result should match applying this slice to a range # of the length of our original array new_len = len(range(len(self))[key]) new_sp_index = make_sparse_index(new_len, sp_index, self.kind) return type(self)._simple_new(sp_vals, new_sp_index, self.dtype) else: indices = np.arange(len(self), dtype=np.int32)[key] return self.take(indices) elif not is_list_like(key): # e.g. "foo" or 2.5 # exception message copied from numpy raise IndexError( r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " r"(`None`) and integer or boolean arrays are valid indices" ) else: if isinstance(key, SparseArray): # NOTE: If we guarantee that SparseDType(bool) # has only fill_value - true, false or nan # (see GH PR 44955) # we can apply mask very fast: if is_bool_dtype(key): if isna(key.fill_value): return self.take(key.sp_index.indices[key.sp_values]) if not key.fill_value: return self.take(key.sp_index.indices) n = len(self) mask = np.full(n, True, dtype=np.bool_) mask[key.sp_index.indices] = False return self.take(np.arange(n)[mask]) else: key = np.asarray(key) key = check_array_indexer(self, key) if com.is_bool_indexer(key): # mypy doesn't know we have an array here key = cast(np.ndarray, key) return self.take(np.arange(len(key), dtype=np.int32)[key]) elif hasattr(key, "__len__"): return self.take(key) else: raise ValueError(f"Cannot slice with '{key}'") return type(self)(data_slice, kind=self.kind) def _get_val_at(self, loc): loc = validate_insert_loc(loc, len(self)) sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: return self.fill_value else: val = self.sp_values[sp_loc] val = maybe_box_datetimelike(val, self.sp_values.dtype) return val def take( self: SparseArrayT, indices, *, allow_fill: bool = False, fill_value=None ) -> SparseArrayT: if is_scalar(indices): raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.") indices = np.asarray(indices, dtype=np.int32) dtype = None if indices.size == 0: result = np.array([], dtype="object") dtype = self.dtype elif allow_fill: result = self._take_with_fill(indices, fill_value=fill_value) else: return self._take_without_fill(indices) return type(self)( result, fill_value=self.fill_value, kind=self.kind, dtype=dtype ) def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: if fill_value is None: fill_value = self.dtype.na_value if indices.min() < -1: raise ValueError( "Invalid value in 'indices'. Must be between -1 " "and the length of the array." ) if indices.max() >= len(self): raise IndexError("out of bounds value in 'indices'.") if len(self) == 0: # Empty... Allow taking only if all empty if (indices == -1).all(): dtype = np.result_type(self.sp_values, type(fill_value)) taken = np.empty_like(indices, dtype=dtype) taken.fill(fill_value) return taken else: raise IndexError("cannot do a non-empty take from an empty axes.") # sp_indexer may be -1 for two reasons # 1.) we took for an index of -1 (new) # 2.) we took a value that was self.fill_value (old) sp_indexer = self.sp_index.lookup_array(indices) new_fill_indices = indices == -1 old_fill_indices = (sp_indexer == -1) & ~new_fill_indices if self.sp_index.npoints == 0 and old_fill_indices.all(): # We've looked up all valid points on an all-sparse array. taken = np.full( sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype ) elif self.sp_index.npoints == 0: # Avoid taking from the empty self.sp_values _dtype = np.result_type(self.dtype.subtype, type(fill_value)) taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) else: taken = self.sp_values.take(sp_indexer) # Fill in two steps. # Old fill values # New fill values # potentially coercing to a new dtype at each stage. m0 = sp_indexer[old_fill_indices] < 0 m1 = sp_indexer[new_fill_indices] < 0 result_type = taken.dtype if m0.any(): result_type = np.result_type(result_type, type(self.fill_value)) taken = taken.astype(result_type) taken[old_fill_indices] = self.fill_value if m1.any(): result_type = np.result_type(result_type, type(fill_value)) taken = taken.astype(result_type) taken[new_fill_indices] = fill_value return taken def _take_without_fill(self: SparseArrayT, indices) -> SparseArrayT: to_shift = indices < 0 n = len(self) if (indices.max() >= n) or (indices.min() < -n): if n == 0: raise IndexError("cannot do a non-empty take from an empty axes.") raise IndexError("out of bounds value in 'indices'.") if to_shift.any(): indices = indices.copy() indices[to_shift] += n sp_indexer = self.sp_index.lookup_array(indices) value_mask = sp_indexer != -1 new_sp_values = self.sp_values[sp_indexer[value_mask]] value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False) new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind) return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype) def searchsorted( self, v: ArrayLike | object, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: msg = "searchsorted requires high memory usage." warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) if not is_scalar(v): v = np.asarray(v) v = np.asarray(v) return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter) def copy(self: SparseArrayT) -> SparseArrayT: values = self.sp_values.copy() return self._simple_new(values, self.sp_index, self.dtype) def _concat_same_type( cls: type[SparseArrayT], to_concat: Sequence[SparseArrayT] ) -> SparseArrayT: fill_value = to_concat[0].fill_value values = [] length = 0 if to_concat: sp_kind = to_concat[0].kind else: sp_kind = "integer" sp_index: SparseIndex if sp_kind == "integer": indices = [] for arr in to_concat: int_idx = arr.sp_index.indices.copy() int_idx += length # TODO: wraparound length += arr.sp_index.length values.append(arr.sp_values) indices.append(int_idx) data = np.concatenate(values) indices_arr = np.concatenate(indices) # error: Argument 2 to "IntIndex" has incompatible type # "ndarray[Any, dtype[signedinteger[_32Bit]]]"; # expected "Sequence[int]" sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type] else: # when concatenating block indices, we don't claim that you'll # get an identical index as concatenating the values and then # creating a new index. We don't want to spend the time trying # to merge blocks across arrays in `to_concat`, so the resulting # BlockIndex may have more blocks. blengths = [] blocs = [] for arr in to_concat: block_idx = arr.sp_index.to_block_index() values.append(arr.sp_values) blocs.append(block_idx.blocs.copy() + length) blengths.append(block_idx.blengths) length += arr.sp_index.length data = np.concatenate(values) blocs_arr = np.concatenate(blocs) blengths_arr = np.concatenate(blengths) sp_index = BlockIndex(length, blocs_arr, blengths_arr) return cls(data, sparse_index=sp_index, fill_value=fill_value) def astype(self, dtype: AstypeArg | None = None, copy: bool = True): """ Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:`numpy.asarray`. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``self.sp_values`` and the ``self.fill_value``. For other dtypes, this only changes the dtype of ``self.sp_values``. copy : bool, default True Whether to ensure a copy is made, even if not necessary. Returns ------- SparseArray Examples -------- >>> arr = pd.arrays.SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) >>> arr.astype(SparseDtype(np.dtype('int32'))) [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Using a NumPy dtype with a different kind (e.g. float) will coerce just ``self.sp_values``. >>> arr.astype(SparseDtype(np.dtype('float64'))) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32) Using a SparseDtype, you can also change the fill value as well. >>> arr.astype(SparseDtype("float64", fill_value=0.0)) ... # doctest: +NORMALIZE_WHITESPACE [0.0, 0.0, 1.0, 2.0] Fill: 0.0 IntIndex Indices: array([2, 3], dtype=int32) """ if is_dtype_equal(dtype, self._dtype): if not copy: return self else: return self.copy() future_dtype = pandas_dtype(dtype) if not isinstance(future_dtype, SparseDtype): # GH#34457 values = np.asarray(self) values = ensure_wrapped_if_datetimelike(values) return astype_array(values, dtype=future_dtype, copy=False) dtype = self.dtype.update_dtype(dtype) subtype = pandas_dtype(dtype._subtype_with_str) subtype = cast(np.dtype, subtype) # ensured by update_dtype values = ensure_wrapped_if_datetimelike(self.sp_values) sp_values = astype_array(values, subtype, copy=copy) sp_values = np.asarray(sp_values) return self._simple_new(sp_values, self.sp_index, dtype) def map(self: SparseArrayT, mapper) -> SparseArrayT: """ Map categories using an input mapping or function. Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.arrays.SparseArray([0, 1, 2]) >>> arr.map(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) """ # this is used in apply. # We get hit since we're an "is_extension_array_dtype" but regular extension # types are not hit. This may be worth adding to the interface. if isinstance(mapper, ABCSeries): mapper = mapper.to_dict() if isinstance(mapper, abc.Mapping): fill_value = mapper.get(self.fill_value, self.fill_value) sp_values = [mapper.get(x, None) for x in self.sp_values] else: fill_value = mapper(self.fill_value) sp_values = [mapper(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value) def to_dense(self) -> np.ndarray: """ Convert SparseArray to a NumPy array. Returns ------- arr : NumPy array """ return np.asarray(self, dtype=self.sp_values.dtype) def _where(self, mask, value): # NB: may not preserve dtype, e.g. result may be Sparse[float64] # while self is Sparse[int64] naive_implementation = np.where(mask, self, value) dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value) result = type(self)._from_sequence(naive_implementation, dtype=dtype) return result # ------------------------------------------------------------------------ # IO # ------------------------------------------------------------------------ def __setstate__(self, state) -> None: """Necessary for making this object picklable""" if isinstance(state, tuple): # Compat for pandas < 0.24.0 nd_state, (fill_value, sp_index) = state sparse_values = np.array([]) sparse_values.__setstate__(nd_state) self._sparse_values = sparse_values self._sparse_index = sp_index self._dtype = SparseDtype(sparse_values.dtype, fill_value) else: self.__dict__.update(state) def nonzero(self) -> tuple[npt.NDArray[np.int32]]: if self.fill_value == 0: return (self.sp_index.indices,) else: return (self.sp_index.indices[self.sp_values != 0],) # ------------------------------------------------------------------------ # Reductions # ------------------------------------------------------------------------ def _reduce(self, name: str, *, skipna: bool = True, **kwargs): method = getattr(self, name, None) if method is None: raise TypeError(f"cannot perform {name} with type {self.dtype}") if skipna: arr = self else: arr = self.dropna() return getattr(arr, name)(**kwargs) def all(self, axis=None, *args, **kwargs): """ Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all """ nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all() def any(self, axis: AxisInt = 0, *args, **kwargs): """ Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any """ nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item() def sum( self, axis: AxisInt = 0, min_count: int = 0, skipna: bool = True, *args, **kwargs, ) -> Scalar: """ Sum of non-NA/null values Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. min_count : int, default 0 The required number of valid values to perform the summation. If fewer than ``min_count`` valid values are present, the result will be the missing value indicator for subarray type. *args, **kwargs Not Used. NumPy compatibility. Returns ------- scalar """ nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() has_na = self.sp_index.ngaps > 0 and not self._null_fill_value if has_na and not skipna: return na_value_for_dtype(self.dtype.subtype, compat=False) if self._null_fill_value: if check_below_min_count(valid_vals.shape, None, min_count): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum else: nsparse = self.sp_index.ngaps if check_below_min_count(valid_vals.shape, None, min_count - nsparse): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum + self.fill_value * nsparse def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray: """ Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseArray will preserve the locations of NaN values, but the fill value will be `np.nan` regardless. Parameters ---------- axis : int or None Axis over which to perform the cumulative summation. If None, perform cumulative summation over flattened array. Returns ------- cumsum : SparseArray """ nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. raise ValueError(f"axis(={axis}) out of bounds") if not self._null_fill_value: return SparseArray(self.to_dense()).cumsum() return SparseArray( self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value, ) def mean(self, axis: Axis = 0, *args, **kwargs): """ Mean of non-NA/null values Returns ------- mean : float """ nv.validate_mean(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() ct = len(valid_vals) if self._null_fill_value: return sp_sum / ct else: nsparse = self.sp_index.ngaps return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) def max(self, *, axis: AxisInt | None = None, skipna: bool = True): """ Max of array values, ignoring NA values if specified. Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. skipna : bool, default True Whether to ignore NA values. Returns ------- scalar """ nv.validate_minmax_axis(axis, self.ndim) return self._min_max("max", skipna=skipna) def min(self, *, axis: AxisInt | None = None, skipna: bool = True): """ Min of array values, ignoring NA values if specified. Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. skipna : bool, default True Whether to ignore NA values. Returns ------- scalar """ nv.validate_minmax_axis(axis, self.ndim) return self._min_max("min", skipna=skipna) def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar: """ Min/max of non-NA/null values Parameters ---------- kind : {"min", "max"} skipna : bool Returns ------- scalar """ valid_vals = self._valid_sp_values has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0 if len(valid_vals) > 0: sp_min_max = getattr(valid_vals, kind)() # If a non-null fill value is currently present, it might be the min/max if has_nonnull_fill_vals: func = max if kind == "max" else min return func(sp_min_max, self.fill_value) elif skipna: return sp_min_max elif self.sp_index.ngaps == 0: # No NAs present return sp_min_max else: return na_value_for_dtype(self.dtype.subtype, compat=False) elif has_nonnull_fill_vals: return self.fill_value else: return na_value_for_dtype(self.dtype.subtype, compat=False) def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int: values = self._sparse_values index = self._sparse_index.indices mask = np.asarray(isna(values)) func = np.argmax if kind == "argmax" else np.argmin idx = np.arange(values.shape[0]) non_nans = values[~mask] non_nan_idx = idx[~mask] _candidate = non_nan_idx[func(non_nans)] candidate = index[_candidate] if isna(self.fill_value): return candidate if kind == "argmin" and self[candidate] < self.fill_value: return candidate if kind == "argmax" and self[candidate] > self.fill_value: return candidate _loc = self._first_fill_value_loc() if _loc == -1: # fill_value doesn't exist return candidate else: return _loc def argmax(self, skipna: bool = True) -> int: validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: raise NotImplementedError return self._argmin_argmax("argmax") def argmin(self, skipna: bool = True) -> int: validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: raise NotImplementedError return self._argmin_argmax("argmin") # ------------------------------------------------------------------------ # Ufuncs # ------------------------------------------------------------------------ _HANDLED_TYPES = (np.ndarray, numbers.Number) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): out = kwargs.get("out", ()) for x in inputs + out: if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)): return NotImplemented # for binary ops, use our custom dunder methods result = ops.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace res = arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) return res if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: # e.g. tests.series.test_ufunc.TestNumpyReductions return result if len(inputs) == 1: # No alignment necessary. sp_values = getattr(ufunc, method)(self.sp_values, **kwargs) fill_value = getattr(ufunc, method)(self.fill_value, **kwargs) if ufunc.nout > 1: # multiple outputs. e.g. modf arrays = tuple( self._simple_new( sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv) ) for sp_value, fv in zip(sp_values, fill_value) ) return arrays elif method == "reduce": # e.g. reductions return sp_values return self._simple_new( sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value) ) new_inputs = tuple(np.asarray(x) for x in inputs) result = getattr(ufunc, method)(*new_inputs, **kwargs) if out: if len(out) == 1: out = out[0] return out if ufunc.nout > 1: return tuple(type(self)(x) for x in result) elif method == "at": # no return value return None else: return type(self)(result) # ------------------------------------------------------------------------ # Ops # ------------------------------------------------------------------------ def _arith_method(self, other, op): op_name = op.__name__ if isinstance(other, SparseArray): return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all="ignore"): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) if op_name == "divmod": left, right = result lfill, rfill = fill return ( _wrap_result(op_name, left, self.sp_index, lfill), _wrap_result(op_name, right, self.sp_index, rfill), ) return _wrap_result(op_name, result, self.sp_index, fill) else: other = np.asarray(other) with np.errstate(all="ignore"): if len(self) != len(other): raise AssertionError( f"length mismatch: {len(self)} vs. {len(other)}" ) if not isinstance(other, SparseArray): dtype = getattr(other, "dtype", None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) def _cmp_method(self, other, op) -> SparseArray: if not is_scalar(other) and not isinstance(other, type(self)): # convert list-like to ndarray other = np.asarray(other) if isinstance(other, np.ndarray): # TODO: make this more flexible than just ndarray... other = SparseArray(other, fill_value=self.fill_value) if isinstance(other, SparseArray): if len(self) != len(other): raise ValueError( f"operands have mismatched length {len(self)} and {len(other)}" ) op_name = op.__name__.strip("_") return _sparse_array_op(self, other, op, op_name) else: # scalar with np.errstate(all="ignore"): fill_value = op(self.fill_value, other) result = np.full(len(self), fill_value, dtype=np.bool_) result[self.sp_index.indices] = op(self.sp_values, other) return type(self)( result, fill_value=fill_value, dtype=np.bool_, ) _logical_method = _cmp_method def _unary_method(self, op) -> SparseArray: fill_value = op(np.array(self.fill_value)).item() dtype = SparseDtype(self.dtype.subtype, fill_value) # NOTE: if fill_value doesn't change # we just have to apply op to sp_values if isna(self.fill_value) or fill_value == self.fill_value: values = op(self.sp_values) return type(self)._simple_new(values, self.sp_index, self.dtype) # In the other case we have to recalc indexes return type(self)(op(self.to_dense()), dtype=dtype) def __pos__(self) -> SparseArray: return self._unary_method(operator.pos) def __neg__(self) -> SparseArray: return self._unary_method(operator.neg) def __invert__(self) -> SparseArray: return self._unary_method(operator.invert) def __abs__(self) -> SparseArray: return self._unary_method(operator.abs) # ---------- # Formatting # ----------- def __repr__(self) -> str: pp_str = printing.pprint_thing(self) pp_fill = printing.pprint_thing(self.fill_value) pp_index = printing.pprint_thing(self.sp_index) return f"{pp_str}\nFill: {pp_fill}\n{pp_index}" def _formatter(self, boxed: bool = False): # Defer to the formatter from the GenericArrayFormatter calling us. # This will infer the correct formatter from the dtype of the values. return None class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) def find_common_type(types: list[np.dtype]) -> np.dtype: ... def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ... def find_common_type(types: list[DtypeObj]) -> DtypeObj: ... def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if not types: raise ValueError("no types given") first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if lib.dtypes_all_equal(list(types)): return first # get unique types (dict.fromkeys is used as order-preserving set()) types = list(dict.fromkeys(types).keys()) if any(isinstance(t, ExtensionDtype) for t in types): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype("object") # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype("datetime64[ns]") if all(is_timedelta64_dtype(t) for t in types): return np.dtype("timedelta64[ns]") # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.dtype("object") return np.find_common_type(types, []) def is_dtype_equal(source, target) -> bool: """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ if isinstance(target, str): if not isinstance(source, str): # GH#38516 ensure we get the same behavior from # is_dtype_equal(CDT, "category") and CDT == "category" try: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = get_dtype(source) target = get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this return False class SparseDtype(ExtensionDtype): """ Dtype for data stored in :class:`SparseArray`. This dtype implements the pandas ExtensionDtype interface. Parameters ---------- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 The dtype of the underlying array storing the non-fill value values. fill_value : scalar, optional The scalar value not stored in the SparseArray. By default, this depends on `dtype`. =========== ========== dtype na_value =========== ========== float ``np.nan`` int ``0`` bool ``False`` datetime64 ``pd.NaT`` timedelta64 ``pd.NaT`` =========== ========== The default value may be overridden by specifying a `fill_value`. Attributes ---------- None Methods ------- None """ # We include `_is_na_fill_value` in the metadata to avoid hash collisions # between SparseDtype(float, 0.0) and SparseDtype(float, nan). # Without is_na_fill_value in the comparison, those would be equal since # hash(nan) is (sometimes?) 0. _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: if isinstance(dtype, type(self)): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype dtype = pandas_dtype(dtype) if is_string_dtype(dtype): dtype = np.dtype("object") if fill_value is None: fill_value = na_value_for_dtype(dtype) self._dtype = dtype self._fill_value = fill_value self._check_fill_value() def __hash__(self) -> int: # Python3 doesn't inherit __hash__ when a base class overrides # __eq__, so we explicitly do it here. return super().__hash__() def __eq__(self, other: Any) -> bool: # We have to override __eq__ to handle NA values in _metadata. # The base class does simple == checks, which fail for NA. if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): subtype = self.subtype == other.subtype if self._is_na_fill_value: # this case is complicated by two things: # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) # i.e. we want to treat any floating-point NaN as equal, but # not a floating-point NaN and a datetime NaT. fill_value = ( other._is_na_fill_value and isinstance(self.fill_value, type(other.fill_value)) or isinstance(other.fill_value, type(self.fill_value)) ) else: with warnings.catch_warnings(): # Ignore spurious numpy warning warnings.filterwarnings( "ignore", "elementwise comparison failed", category=DeprecationWarning, ) fill_value = self.fill_value == other.fill_value return subtype and fill_value return False def fill_value(self): """ The fill value of the array. Converting the SparseArray to a dense ndarray will fill the array with this value. .. warning:: It's possible to end up with a SparseArray that has ``fill_value`` values in ``sp_values``. This can occur, for example, when setting ``SparseArray.fill_value`` directly. """ return self._fill_value def _check_fill_value(self): if not is_scalar(self._fill_value): raise ValueError( f"fill_value must be a scalar. Got {self._fill_value} instead" ) # TODO: Right now we can use Sparse boolean array # with any fill_value. Here was an attempt # to allow only 3 value: True, False or nan # but plenty test has failed. # see pull 44955 # if self._is_boolean and not ( # is_bool(self._fill_value) or isna(self._fill_value) # ): # raise ValueError( # "fill_value must be True, False or nan " # f"for boolean type. Got {self._fill_value} instead" # ) def _is_na_fill_value(self) -> bool: return isna(self.fill_value) def _is_numeric(self) -> bool: return not is_object_dtype(self.subtype) def _is_boolean(self) -> bool: return is_bool_dtype(self.subtype) def kind(self) -> str: """ The sparse kind. Either 'integer', or 'block'. """ return self.subtype.kind def type(self): return self.subtype.type def subtype(self): return self._dtype def name(self) -> str: return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]" def __repr__(self) -> str: return self.name def construct_array_type(cls) -> type_t[SparseArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.sparse.array import SparseArray return SparseArray def construct_from_string(cls, string: str) -> SparseDtype: """ Construct a SparseDtype from a string form. Parameters ---------- string : str Can take the following forms. string dtype ================ ============================ 'int' SparseDtype[np.int64, 0] 'Sparse' SparseDtype[np.float64, nan] 'Sparse[int]' SparseDtype[np.int64, 0] 'Sparse[int, 0]' SparseDtype[np.int64, 0] ================ ============================ It is not possible to specify non-default fill values with a string. An argument like ``'Sparse[int, 1]'`` will raise a ``TypeError`` because the default fill value for integers is 0. Returns ------- SparseDtype """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'SparseDtype' from '{string}'" if string.startswith("Sparse"): try: sub_type, has_fill_value = cls._parse_subtype(string) except ValueError as err: raise TypeError(msg) from err else: result = SparseDtype(sub_type) msg = ( f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt " "looks like the fill_value in the string is not " "the default for the dtype. Non-default fill_values " "are not supported. Use the 'SparseDtype()' " "constructor instead." ) if has_fill_value and str(result) != string: raise TypeError(msg) return result else: raise TypeError(msg) def _parse_subtype(dtype: str) -> tuple[str, bool]: """ Parse a string to get the subtype Parameters ---------- dtype : str A string like * Sparse[subtype] * Sparse[subtype, fill_value] Returns ------- subtype : str Raises ------ ValueError When the subtype cannot be extracted. """ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$") m = xpr.match(dtype) has_fill_value = False if m: subtype = m.groupdict()["subtype"] has_fill_value = bool(m.groupdict()["fill_value"]) elif dtype == "Sparse": subtype = "float64" else: raise ValueError(f"Cannot parse {dtype}") return subtype, has_fill_value def is_dtype(cls, dtype: object) -> bool: dtype = getattr(dtype, "dtype", dtype) if isinstance(dtype, str) and dtype.startswith("Sparse"): sub_type, _ = cls._parse_subtype(dtype) dtype = np.dtype(sub_type) elif isinstance(dtype, cls): return True return isinstance(dtype, np.dtype) or dtype == "Sparse" def update_dtype(self, dtype) -> SparseDtype: """ Convert the SparseDtype to a new dtype. This takes care of converting the ``fill_value``. Parameters ---------- dtype : Union[str, numpy.dtype, SparseDtype] The new dtype to use. * For a SparseDtype, it is simply returned * For a NumPy dtype (or str), the current fill value is converted to the new dtype, and a SparseDtype with `dtype` and the new fill value is returned. Returns ------- SparseDtype A new SparseDtype with the correct `dtype` and fill value for that `dtype`. Raises ------ ValueError When the current fill value cannot be converted to the new `dtype` (e.g. trying to convert ``np.nan`` to an integer dtype). Examples -------- >>> SparseDtype(int, 0).update_dtype(float) Sparse[float64, 0.0] >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) Sparse[float64, nan] """ cls = type(self) dtype = pandas_dtype(dtype) if not isinstance(dtype, cls): if not isinstance(dtype, np.dtype): raise TypeError("sparse arrays of extension dtypes not supported") fv_asarray = np.atleast_1d(np.array(self.fill_value)) fvarr = astype_array(fv_asarray, dtype) # NB: not fv_0d.item(), as that casts dt64->int fill_value = fvarr[0] dtype = cls(dtype, fill_value=fill_value) return dtype def _subtype_with_str(self): """ Whether the SparseDtype's subtype should be considered ``str``. Typically, pandas will store string data in an object-dtype array. When converting values to a dtype, e.g. in ``.astype``, we need to be more specific, we need the actual underlying type. Returns ------- >>> SparseDtype(int, 1)._subtype_with_str dtype('int64') >>> SparseDtype(object, 1)._subtype_with_str dtype('O') >>> dtype = SparseDtype(str, '') >>> dtype.subtype dtype('O') >>> dtype._subtype_with_str <class 'str'> """ if isinstance(self.fill_value, str): return type(self.fill_value) return self.subtype def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # TODO for now only handle SparseDtypes and numpy dtypes => extend # with other compatible extension dtypes if any( isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype) for x in dtypes ): return None fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] fill_value = fill_values[0] # np.nan isn't a singleton, so we may end up with multiple # NaNs here, so we ignore the all NA case too. if not (len(set(fill_values)) == 1 or isna(fill_values).all()): warnings.warn( "Concatenating sparse arrays with multiple fill " f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, stacklevel=find_stack_level(), ) np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value) The provided code snippet includes necessary dependencies for implementing the `_sparse_array_op` function. Write a Python function `def _sparse_array_op( left: SparseArray, right: SparseArray, op: Callable, name: str ) -> SparseArray` to solve the following problem: Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray Here is the function: def _sparse_array_op( left: SparseArray, right: SparseArray, op: Callable, name: str ) -> SparseArray: """ Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray """ if name.startswith("__"): # For lookups in _libs.sparse we need non-dunder op name name = name[2:-2] # dtype used to find corresponding sparse method ltype = left.dtype.subtype rtype = right.dtype.subtype if not is_dtype_equal(ltype, rtype): subtype = find_common_type([ltype, rtype]) ltype = SparseDtype(subtype, left.fill_value) rtype = SparseDtype(subtype, right.fill_value) left = left.astype(ltype, copy=False) right = right.astype(rtype, copy=False) dtype = ltype.subtype else: dtype = ltype # dtype the result must have result_dtype = None if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: with np.errstate(all="ignore"): result = op(left.to_dense(), right.to_dense()) fill = op(_get_fill(left), _get_fill(right)) if left.sp_index.ngaps == 0: index = left.sp_index else: index = right.sp_index elif left.sp_index.equals(right.sp_index): with np.errstate(all="ignore"): result = op(left.sp_values, right.sp_values) fill = op(_get_fill(left), _get_fill(right)) index = left.sp_index else: if name[0] == "r": left, right = right, left name = name[1:] if name in ("and", "or", "xor") and dtype == "bool": opname = f"sparse_{name}_uint8" # to make template simple, cast here left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) result_dtype = bool else: opname = f"sparse_{name}_{dtype}" left_sp_values = left.sp_values right_sp_values = right.sp_values if ( name in ["floordiv", "mod"] and (right == 0).any() and left.dtype.kind in ["i", "u"] ): # Match the non-Sparse Series behavior opname = f"sparse_{name}_float64" left_sp_values = left_sp_values.astype("float64") right_sp_values = right_sp_values.astype("float64") sparse_op = getattr(splib, opname) with np.errstate(all="ignore"): result, index, fill = sparse_op( left_sp_values, left.sp_index, left.fill_value, right_sp_values, right.sp_index, right.fill_value, ) if name == "divmod": # result is a 2-tuple # error: Incompatible return value type (got "Tuple[SparseArray, # SparseArray]", expected "SparseArray") return ( # type: ignore[return-value] _wrap_result(name, result[0], index, fill[0], dtype=result_dtype), _wrap_result(name, result[1], index, fill[1], dtype=result_dtype), ) if result_dtype is None: result_dtype = result.dtype return _wrap_result(name, result, index, fill, dtype=result_dtype)
Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray
173,312
from __future__ import annotations from collections import abc import numbers import operator from typing import ( TYPE_CHECKING, Any, Callable, Literal, Sequence, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib import pandas._libs.sparse as splib from pandas._libs.sparse import ( BlockIndex, IntIndex, SparseIndex, ) from pandas._libs.tslibs import NaT from pandas._typing import ( ArrayLike, AstypeArg, Axis, AxisInt, Dtype, NpDtype, PositionalIndexer, Scalar, ScalarIndexer, SequenceIndexer, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_bool_kwarg, validate_insert_loc, ) from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, find_common_type, maybe_box_datetimelike, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dtype_equal, is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core import ( arraylike, ops, ) import pandas.core.algorithms as algos from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse.dtype import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, ) from pandas.core.indexers import ( check_array_indexer, unpack_tuple_and_ellipses, ) from pandas.core.missing import interpolate_2d from pandas.core.nanops import check_below_min_count from pandas.io.formats import printing def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: ... def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: ... def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex: index: SparseIndex if kind == "block": locs, lens = splib.get_blocks(indices) index = BlockIndex(length, locs, lens) elif kind == "integer": index = IntIndex(length, indices) else: # pragma: no cover raise ValueError("must be block or integer type") return index def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: """ Cast array (ndarray or ExtensionArray) to the new dtype. Parameters ---------- values : ndarray or ExtensionArray dtype : dtype object copy : bool, default False copy if indicated Returns ------- ndarray or ExtensionArray """ if is_dtype_equal(values.dtype, dtype): if copy: return values.copy() return values if not isinstance(values, np.ndarray): # i.e. ExtensionArray values = values.astype(dtype, copy=copy) else: values = _astype_nansafe(values, dtype, copy=copy) # in pandas we don't store numpy str dtypes, so convert to object if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): values = np.array(values, dtype=object) return values def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_string_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the string dtype. If an array is passed with an object dtype, the elements must be inferred as strings. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False >>> is_string_dtype(pd.Series([1, 2], dtype=object)) False """ if hasattr(arr_or_dtype, "dtype") and get_dtype(arr_or_dtype).kind == "O": return is_all_strings(arr_or_dtype) def condition(dtype) -> bool: if is_string_or_object_np_dtype(dtype): return True try: return dtype == "string" except TypeError: return False return _is_dtype(arr_or_dtype, condition) def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : bool, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) numpy.datetime64('NaT') """ if isinstance(dtype, ExtensionDtype): return dtype.na_value elif needs_i8_conversion(dtype): return dtype.type("NaT", "ns") elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): if compat: return 0 return np.nan elif is_bool_dtype(dtype): if compat: return False return np.nan return np.nan def ensure_wrapped_if_datetimelike(arr): """ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. """ if isinstance(arr, np.ndarray): if arr.dtype.kind == "M": from pandas.core.arrays import DatetimeArray return DatetimeArray._from_sequence(arr) elif arr.dtype.kind == "m": from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(arr) return arr The provided code snippet includes necessary dependencies for implementing the `_make_sparse` function. Write a Python function `def _make_sparse( arr: np.ndarray, kind: SparseIndexKind = "block", fill_value=None, dtype: np.dtype | None = None, )` to solve the following problem: Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) Here is the function: def _make_sparse( arr: np.ndarray, kind: SparseIndexKind = "block", fill_value=None, dtype: np.dtype | None = None, ): """ Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) """ assert isinstance(arr, np.ndarray) if arr.ndim > 1: raise TypeError("expected dimension <= 1 data") if fill_value is None: fill_value = na_value_for_dtype(arr.dtype) if isna(fill_value): mask = notna(arr) else: # cast to object comparison to be safe if is_string_dtype(arr.dtype): arr = arr.astype(object) if is_object_dtype(arr.dtype): # element-wise equality check method in numpy doesn't treat # each element type, eg. 0, 0.0, and False are treated as # same. So we have to check the both of its type and value. mask = splib.make_mask_object_ndarray(arr, fill_value) else: mask = arr != fill_value length = len(arr) if length != len(mask): # the arr is a SparseArray indices = mask.sp_index.indices else: indices = mask.nonzero()[0].astype(np.int32) index = make_sparse_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values) sparsified_values = astype_array(sparsified_values, dtype=dtype) sparsified_values = np.asarray(sparsified_values) # TODO: copy return sparsified_values, index, fill_value
Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
173,313
from __future__ import annotations from typing import ( TYPE_CHECKING, Iterable, ) import numpy as np from pandas._libs import lib from pandas._typing import ( IndexLabel, npt, ) from pandas.core.dtypes.missing import notna from pandas.core.algorithms import factorize from pandas.core.indexes.api import MultiIndex from pandas.core.series import Series def _to_ijv( ss, row_levels: tuple[int] | list[int] = (0,), column_levels: tuple[int] | list[int] = (1,), sort_labels: bool = False, ) -> tuple[ np.ndarray, npt.NDArray[np.intp], npt.NDArray[np.intp], list[IndexLabel], list[IndexLabel], ]: """ For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor, and ilabels and jlabels are the row and column labels respectively. Parameters ---------- ss : Series row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. When `row_levels` and/or `column_levels` refer to a single level, set to `True` for a faster execution. Returns ------- values : numpy.ndarray Valid values to populate a sparse matrix, extracted from ss. i_coords : numpy.ndarray (row coordinates of the values) j_coords : numpy.ndarray (column coordinates of the values) i_labels : list (row labels) j_labels : list (column labels) """ # index and column levels must be a partition of the index _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) # From the sparse Series, get the integer indices and data for valid sparse # entries. sp_vals = ss.array.sp_values na_mask = notna(sp_vals) values = sp_vals[na_mask] valid_ilocs = ss.array.sp_index.indices[na_mask] i_coords, i_labels = _levels_to_axis( ss, row_levels, valid_ilocs, sort_labels=sort_labels ) j_coords, j_labels = _levels_to_axis( ss, column_levels, valid_ilocs, sort_labels=sort_labels ) return values, i_coords, j_coords, i_labels, j_labels class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... IndexLabel = Union[Hashable, Sequence[Hashable]] class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `sparse_series_to_coo` function. Write a Python function `def sparse_series_to_coo( ss: Series, row_levels: Iterable[int] = (0,), column_levels: Iterable[int] = (1,), sort_labels: bool = False, ) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]` to solve the following problem: Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. Here is the function: def sparse_series_to_coo( ss: Series, row_levels: Iterable[int] = (0,), column_levels: Iterable[int] = (1,), sort_labels: bool = False, ) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]: """ Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError("to_coo requires MultiIndex with nlevels >= 2.") if not ss.index.is_unique: raise ValueError( "Duplicate index entries are not allowed in to_coo transformation." ) # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv( ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels ) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns)) ) return sparse_matrix, rows, columns
Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels.
173,314
from __future__ import annotations from typing import ( TYPE_CHECKING, Iterable, ) import numpy as np from pandas._libs import lib from pandas._typing import ( IndexLabel, npt, ) from pandas.core.dtypes.missing import notna from pandas.core.algorithms import factorize from pandas.core.indexes.api import MultiIndex from pandas.core.series import Series class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `coo_to_sparse_series` function. Write a Python function `def coo_to_sparse_series( A: scipy.sparse.coo_matrix, dense_index: bool = False ) -> Series` to solve the following problem: Convert a scipy.sparse.coo_matrix to a Series with type sparse. Parameters ---------- A : scipy.sparse.coo_matrix dense_index : bool, default False Returns ------- Series Raises ------ TypeError if A is not a coo_matrix Here is the function: def coo_to_sparse_series( A: scipy.sparse.coo_matrix, dense_index: bool = False ) -> Series: """ Convert a scipy.sparse.coo_matrix to a Series with type sparse. Parameters ---------- A : scipy.sparse.coo_matrix dense_index : bool, default False Returns ------- Series Raises ------ TypeError if A is not a coo_matrix """ from pandas import SparseDtype try: ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False) except AttributeError as err: raise TypeError( f"Expected coo_matrix. Got {type(A).__name__} instead." ) from err ser = ser.sort_index() ser = ser.astype(SparseDtype(ser.dtype)) if dense_index: ind = MultiIndex.from_product([A.row, A.col]) ser = ser.reindex(ind) return ser
Convert a scipy.sparse.coo_matrix to a Series with type sparse. Parameters ---------- A : scipy.sparse.coo_matrix dense_index : bool, default False Returns ------- Series Raises ------ TypeError if A is not a coo_matrix
173,315
from __future__ import annotations import numbers from typing import ( TYPE_CHECKING, Any, Callable, Mapping, TypeVar, ) import numpy as np from pandas._libs import ( lib, missing as libmissing, ) from pandas._typing import ( Dtype, DtypeObj, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( is_bool_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_string_dtype, pandas_dtype, ) from pandas.core.arrays.masked import ( BaseMaskedArray, BaseMaskedDtype, ) def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_string_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the string dtype. If an array is passed with an object dtype, the elements must be inferred as strings. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False >>> is_string_dtype(pd.Series([1, 2], dtype=object)) False """ if hasattr(arr_or_dtype, "dtype") and get_dtype(arr_or_dtype).kind == "O": return is_all_strings(arr_or_dtype) def condition(dtype) -> bool: if is_string_or_object_np_dtype(dtype): return True try: return dtype == "string" except TypeError: return False return _is_dtype(arr_or_dtype, condition) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) def _coerce_to_data_and_mask(values, mask, dtype, copy, dtype_cls, default_dtype): checker = dtype_cls._checker inferred_type = None if dtype is None and hasattr(values, "dtype"): if checker(values.dtype): dtype = values.dtype if dtype is not None: dtype = dtype_cls._standardize_dtype(dtype) cls = dtype_cls.construct_array_type() if isinstance(values, cls): values, mask = values._data, values._mask if dtype is not None: values = values.astype(dtype.numpy_dtype, copy=False) if copy: values = values.copy() mask = mask.copy() return values, mask, dtype, inferred_type original = values values = np.array(values, copy=copy) inferred_type = None if is_object_dtype(values.dtype) or is_string_dtype(values.dtype): inferred_type = lib.infer_dtype(values, skipna=True) if inferred_type == "boolean" and dtype is None: name = dtype_cls.__name__.strip("_") raise TypeError(f"{values.dtype} cannot be converted to {name}") elif is_bool_dtype(values) and checker(dtype): values = np.array(values, dtype=default_dtype, copy=copy) elif not (is_integer_dtype(values) or is_float_dtype(values)): name = dtype_cls.__name__.strip("_") raise TypeError(f"{values.dtype} cannot be converted to {name}") if values.ndim != 1: raise TypeError("values must be a 1D list-like") if mask is None: if is_integer_dtype(values): # fastpath mask = np.zeros(len(values), dtype=np.bool_) else: mask = libmissing.is_numeric_na(values) else: assert len(mask) == len(values) if mask.ndim != 1: raise TypeError("mask must be a 1D list-like") # infer dtype if needed if dtype is None: dtype = default_dtype else: dtype = dtype.type if is_integer_dtype(dtype) and is_float_dtype(values.dtype) and len(values) > 0: if mask.all(): values = np.ones(values.shape, dtype=dtype) else: idx = np.nanargmax(values) if int(values[idx]) != original[idx]: # We have ints that lost precision during the cast. inferred_type = lib.infer_dtype(original, skipna=True) if ( inferred_type not in ["floating", "mixed-integer-float"] and not mask.any() ): values = np.array(original, dtype=dtype, copy=False) else: values = np.array(original, dtype="object", copy=False) # we copy as need to coerce here if mask.any(): values = values.copy() values[mask] = cls._internal_fill_value if inferred_type in ("string", "unicode"): # casts from str are always safe since they raise # a ValueError if the str cannot be parsed into a float values = values.astype(dtype, copy=copy) else: values = dtype_cls._safe_cast(values, dtype, copy=False) return values, mask, dtype, inferred_type
null
173,316
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array DtypeObj = Union[np.dtype, "ExtensionDtype"] def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def needs_i8_conversion(arr_or_dtype) -> bool: """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ if arr_or_dtype is None: return False if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind in ["m", "M"] elif isinstance(arr_or_dtype, ExtensionDtype): return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, np.dtype): return dtype.kind in ["m", "M"] return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: # Bottleneck chokes on datetime64, PeriodDtype (or and EA) if not is_object_dtype(dtype) and not needs_i8_conversion(dtype): # GH 42878 # Bottleneck uses naive summation leading to O(n) loss of precision # unlike numpy which implements pairwise summation, which has O(log(n)) loss # crossref: https://github.com/pydata/bottleneck/issues/379 # GH 15507 # bottleneck does not properly upcast during the sum # so can overflow # GH 9422 # further we also want to preserve NaN when all elements # are NaN, unlike bottleneck/numpy which consider this # to be 0 return name not in ["nansum", "nanprod", "nanmean"] return False
null
173,317
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _has_infs(result) -> bool: if isinstance(result, np.ndarray): if result.dtype in ("f8", "f4"): # Note: outside of an nanops-specific test, we always have # result.ndim == 1, so there is no risk of this ravel making a copy. return lib.has_infs(result.ravel("K")) try: return np.isinf(result).any() except (TypeError, NotImplementedError): # if it doesn't support infs, then it can't have infs return False
null
173,318
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] AxisInt = int def is_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a numeric dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a numeric dtype. Examples -------- >>> from pandas.api.types import is_numeric_dtype >>> is_numeric_dtype(str) False >>> is_numeric_dtype(int) True >>> is_numeric_dtype(float) True >>> is_numeric_dtype(np.uint64) True >>> is_numeric_dtype(np.datetime64) False >>> is_numeric_dtype(np.timedelta64) False >>> is_numeric_dtype(np.array(['a', 'b'])) False >>> is_numeric_dtype(pd.Series([1, 2])) True >>> is_numeric_dtype(pd.Index([1, 2.])) True >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric ) def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : bool, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) numpy.datetime64('NaT') """ if isinstance(dtype, ExtensionDtype): return dtype.na_value elif needs_i8_conversion(dtype): return dtype.type("NaT", "ns") elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): if compat: return 0 return np.nan elif is_bool_dtype(dtype): if compat: return False return np.nan return np.nan The provided code snippet includes necessary dependencies for implementing the `_na_for_min_count` function. Write a Python function `def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray` to solve the following problem: Return the missing value for `values`. Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. Here is the function: def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray: """ Return the missing value for `values`. Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype("float64") fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value elif axis is None: return fill_value else: result_shape = values.shape[:axis] + values.shape[axis + 1 :] return np.full(result_shape, fill_value, dtype=values.dtype)
Return the missing value for `values`. Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing.
173,319
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... AxisInt = int F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `maybe_operate_rowwise` function. Write a Python function `def maybe_operate_rowwise(func: F) -> F` to solve the following problem: NumPy operations on C-contiguous ndarrays with axis=1 can be very slow if axis 1 >> axis 0. Operate row-by-row and concatenate the results. Here is the function: def maybe_operate_rowwise(func: F) -> F: """ NumPy operations on C-contiguous ndarrays with axis=1 can be very slow if axis 1 >> axis 0. Operate row-by-row and concatenate the results. """ @functools.wraps(func) def newfunc(values: np.ndarray, *, axis: AxisInt | None = None, **kwargs): if ( axis == 1 and values.ndim == 2 and values.flags["C_CONTIGUOUS"] # only takes this path for wide arrays (long dataframes), for threshold see # https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737 and (values.shape[1] / 1000) > values.shape[0] and values.dtype != object and values.dtype != bool ): arrs = list(values) if kwargs.get("mask") is not None: mask = kwargs.pop("mask") results = [ func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs)) ] else: results = [func(x, **kwargs) for x in arrs] return np.array(results) return func(values, axis=axis, **kwargs) return cast(F, newfunc)
NumPy operations on C-contiguous ndarrays with axis=1 can be very slow if axis 1 >> axis 0. Operate row-by-row and concatenate the results.
173,320
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value AxisInt = int def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def needs_i8_conversion(arr_or_dtype) -> bool: """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ if arr_or_dtype is None: return False if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind in ["m", "M"] elif isinstance(arr_or_dtype, ExtensionDtype): return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, np.dtype): return dtype.kind in ["m", "M"] return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) The provided code snippet includes necessary dependencies for implementing the `nanany` function. Write a Python function `def nanany( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> bool` to solve the following problem: Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> from pandas.core import nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False Here is the function: def nanany( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> bool: """ Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> from pandas.core import nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False """ if needs_i8_conversion(values.dtype) and values.dtype.kind != "m": # GH#34479 warnings.warn( "'any' with datetime64 dtypes is deprecated and will raise in a " "future version. Use (obj != pd.Timestamp(0)).any() instead.", FutureWarning, stacklevel=find_stack_level(), ) values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask) # For object type, any won't necessarily return # boolean values (numpy/numpy#4352) if is_object_dtype(values): values = values.astype(bool) # error: Incompatible return value type (got "Union[bool_, ndarray]", expected # "bool") return values.any(axis) # type: ignore[return-value]
Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> from pandas.core import nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False
173,321
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value AxisInt = int def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def needs_i8_conversion(arr_or_dtype) -> bool: """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ if arr_or_dtype is None: return False if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind in ["m", "M"] elif isinstance(arr_or_dtype, ExtensionDtype): return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, np.dtype): return dtype.kind in ["m", "M"] return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) The provided code snippet includes necessary dependencies for implementing the `nanall` function. Write a Python function `def nanall( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> bool` to solve the following problem: Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> from pandas.core import nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False Here is the function: def nanall( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> bool: """ Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> from pandas.core import nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False """ if needs_i8_conversion(values.dtype) and values.dtype.kind != "m": # GH#34479 warnings.warn( "'all' with datetime64 dtypes is deprecated and will raise in a " "future version. Use (obj != pd.Timestamp(0)).all() instead.", FutureWarning, stacklevel=find_stack_level(), ) values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask) # For object type, all won't necessarily return # boolean values (numpy/numpy#4352) if is_object_dtype(values): values = values.astype(bool) # error: Incompatible return value type (got "Union[bool_, ndarray]", expected # "bool") return values.all(axis) # type: ignore[return-value]
Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> from pandas.core import nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False
173,322
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _maybe_null_out( result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int = 1, ) -> np.ndarray | float | NaTType: """ Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) """ if mask is None and min_count == 0: # nothing to check; short-circuit return result if axis is not None and isinstance(result, np.ndarray): if mask is not None: null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 else: # we have no nulls, kept mask=None in _maybe_get_mask below_count = shape[axis] - min_count < 0 new_shape = shape[:axis] + shape[axis + 1 :] null_mask = np.broadcast_to(below_count, new_shape) if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): result = result.astype("c16") elif not is_float_dtype(result): result = result.astype("f8", copy=False) result[null_mask] = np.nan else: # GH12941, use None to auto cast null result[null_mask] = None elif result is not NaT: if check_below_min_count(shape, mask, min_count): result_dtype = getattr(result, "dtype", None) if is_float_dtype(result_dtype): # error: Item "None" of "Optional[Any]" has no attribute "type" result = result_dtype.type("nan") # type: ignore[union-attr] else: result = np.nan return result AxisInt = int def is_timedelta64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the timedelta64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the timedelta64 dtype. Examples -------- >>> from pandas.core.dtypes.common import is_timedelta64_dtype >>> is_timedelta64_dtype(object) False >>> is_timedelta64_dtype(np.timedelta64) True >>> is_timedelta64_dtype([1, 2, 3]) False >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) True >>> is_timedelta64_dtype('0 days') False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "m" return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) The provided code snippet includes necessary dependencies for implementing the `nansum` function. Write a Python function `def nansum( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, min_count: int = 0, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0 Here is the function: def nansum( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, min_count: int = 0, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, fill_value=0, mask=mask ) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.dtype(np.float64) the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count) return the_sum
Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0
173,323
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _get_counts( values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, dtype: np.dtype = np.dtype(np.float64), ) -> float | np.ndarray: """ Get the count of non-null values along an axis Parameters ---------- values_shape : tuple of int shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along dtype : type, optional type to use for count Returns ------- count : scalar or array """ if axis is None: if mask is not None: n = mask.size - mask.sum() else: n = np.prod(values_shape) return dtype.type(n) if mask is not None: count = mask.shape[axis] - mask.sum(axis) else: count = values_shape[axis] if is_scalar(count): return dtype.type(count) return count.astype(dtype, copy=False) def _ensure_numeric(x): if isinstance(x, np.ndarray): if is_integer_dtype(x) or is_bool_dtype(x): x = x.astype(np.float64) elif is_object_dtype(x): try: x = x.astype(np.complex128) except (TypeError, ValueError): try: x = x.astype(np.float64) except ValueError as err: # GH#29941 we get here with object arrays containing strs raise TypeError(f"Could not convert {x} to numeric") from err else: if not np.any(np.imag(x)): x = x.real elif not (is_float(x) or is_integer(x) or is_complex(x)): try: x = float(x) except (TypeError, ValueError): # e.g. "1+1j" or "foo" try: x = complex(x) except ValueError as err: # e.g. "foo" raise TypeError(f"Could not convert {x} to numeric") from err return x def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... AxisInt = int def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) The provided code snippet includes necessary dependencies for implementing the `nanmean` function. Write a Python function `def nanmean( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 Here is the function: def nanmean( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, fill_value=0, mask=mask ) dtype_sum = dtype_max dtype_count = np.dtype(np.float64) # not using needs_i8_conversion because that includes period if dtype.kind in ["m", "M"]: dtype_sum = np.dtype(np.float64) elif is_integer_dtype(dtype): dtype_sum = np.dtype(np.float64) elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(values.shape, mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, "ndim", False): count = cast(np.ndarray, count) with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return the_mean
Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5
173,324
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _wrap_results(result, dtype: np.dtype, fill_value=None): """wrap our results if needed""" if result is NaT: pass elif is_datetime64_any_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan if isna(result): result = np.datetime64("NaT", "ns").astype(dtype) else: result = np.int64(result).view(dtype) # retain original unit result = result.astype(dtype, copy=False) else: # If we have float dtype, taking a view will give the wrong result result = result.astype(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value or np.isnan(result): result = np.timedelta64("NaT").astype(dtype) elif np.fabs(result) > lib.i8max: # raise if we have a timedelta64[ns] which is too large raise ValueError("overflow in timedelta operation") else: # return a timedelta64 with the original unit result = np.int64(result).astype(dtype, copy=False) else: result = result.astype("m8[ns]").view(dtype) return result def get_empty_reduction_result( shape: tuple[int, ...], axis: AxisInt, dtype: np.dtype | type[np.floating], fill_value: Any, ) -> np.ndarray: """ The result from a reduction on an empty ndarray. Parameters ---------- shape : Tuple[int] axis : int dtype : np.dtype fill_value : Any Returns ------- np.ndarray """ shp = np.array(shape) dims = np.arange(len(shape)) ret = np.empty(shp[dims != axis], dtype=dtype) ret.fill(fill_value) return ret AxisInt = int def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res The provided code snippet includes necessary dependencies for implementing the `nanmedian` function. Write a Python function `def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=None)` to solve the following problem: Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 2]) >>> nanops.nanmedian(s) 2.0 Here is the function: def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=None): """ Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 2]) >>> nanops.nanmedian(s) 2.0 """ def get_median(x, _mask=None): if _mask is None: _mask = notna(x) else: _mask = ~_mask if not skipna and not _mask.all(): return np.nan with warnings.catch_warnings(): # Suppress RuntimeWarning about All-NaN slice warnings.filterwarnings( "ignore", "All-NaN slice encountered", RuntimeWarning ) res = np.nanmedian(x[_mask]) return res values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask, fill_value=0) if not is_float_dtype(values.dtype): try: values = values.astype("f8") except ValueError as err: # e.g. "could not convert string to float: 'a'" raise TypeError(str(err)) from err if mask is not None: values[mask] = np.nan notempty = values.size # an array from a frame if values.ndim > 1 and axis is not None: # there's a non-empty array to apply over otherwise numpy raises if notempty: if not skipna: res = np.apply_along_axis(get_median, axis, values) else: # fastpath for the skipna case with warnings.catch_warnings(): # Suppress RuntimeWarning about All-NaN slice warnings.filterwarnings( "ignore", "All-NaN slice encountered", RuntimeWarning ) res = np.nanmedian(values, axis) else: # must return the correct shape, but median is not defined for the # empty set so return nans of shape "everything but the passed axis" # since "axis" is where the reduction would occur if we had a nonempty # array res = get_empty_reduction_result(values.shape, axis, np.float_, np.nan) else: # otherwise return a scalar value res = get_median(values, mask) if notempty else np.nan return _wrap_results(res, dtype)
Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 2]) >>> nanops.nanmedian(s) 2.0
173,325
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _wrap_results(result, dtype: np.dtype, fill_value=None): """wrap our results if needed""" if result is NaT: pass elif is_datetime64_any_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan if isna(result): result = np.datetime64("NaT", "ns").astype(dtype) else: result = np.int64(result).view(dtype) # retain original unit result = result.astype(dtype, copy=False) else: # If we have float dtype, taking a view will give the wrong result result = result.astype(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value or np.isnan(result): result = np.timedelta64("NaT").astype(dtype) elif np.fabs(result) > lib.i8max: # raise if we have a timedelta64[ns] which is too large raise ValueError("overflow in timedelta operation") else: # return a timedelta64 with the original unit result = np.int64(result).astype(dtype, copy=False) else: result = result.astype("m8[ns]").view(dtype) return result def nanvar( values, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask=None, ): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = extract_array(values, extract_numpy=True) dtype = values.dtype mask = _maybe_get_mask(values, skipna, mask) if is_any_int_dtype(dtype): values = values.astype("f8") if mask is not None: values[mask] = np.nan if is_float_dtype(values.dtype): count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(values.shape, mask, axis, ddof) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) if mask is not None: np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype, copy=False) return result AxisInt = int The provided code snippet includes necessary dependencies for implementing the `nanstd` function. Write a Python function `def nanstd( values, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask=None, )` to solve the following problem: Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0 Here is the function: def nanstd( values, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask=None, ): """ Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0 """ if values.dtype == "M8[ns]": values = values.view("m8[ns]") orig_dtype = values.dtype values, mask, _, _, _ = _get_values(values, skipna, mask=mask) result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, orig_dtype)
Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0
173,326
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _maybe_get_mask( values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None ) -> npt.NDArray[np.bool_] | None: """ Compute a mask if and only if necessary. This function will compute a mask iff it is necessary. Otherwise, return the provided mask (potentially None) when a mask does not need to be computed. A mask is never necessary if the values array is of boolean or integer dtypes, as these are incapable of storing NaNs. If passing a NaN-capable dtype that is interpretable as either boolean or integer data (eg, timedelta64), a mask must be provided. If the skipna parameter is False, a new mask will not be computed. The mask is computed using isna() by default. Setting invert=True selects notna() as the masking function. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped mask : Optional[ndarray] nan-mask if known Returns ------- Optional[np.ndarray[bool]] """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): # Boolean data cannot contain nulls, so signal via mask being None return None if skipna or needs_i8_conversion(values.dtype): mask = isna(values) return mask def _get_counts_nanvar( values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, ddof: int, dtype: np.dtype = np.dtype(np.float64), ) -> tuple[float | np.ndarray, float | np.ndarray]: """ Get the count of non-null values along an axis, accounting for degrees of freedom. Parameters ---------- values_shape : Tuple[int, ...] shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along ddof : int degrees of freedom dtype : type, optional type to use for count Returns ------- count : int, np.nan or np.ndarray d : int, np.nan or np.ndarray """ count = _get_counts(values_shape, mask, axis, dtype=dtype) d = count - dtype.type(ddof) # always return NaN, never inf if is_scalar(count): if count <= ddof: count = np.nan d = np.nan else: # count is not narrowed by is_scalar check count = cast(np.ndarray, count) mask = count <= ddof if mask.any(): np.putmask(d, mask, np.nan) np.putmask(count, mask, np.nan) return count, d def nanvar( values, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask=None, ): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = extract_array(values, extract_numpy=True) dtype = values.dtype mask = _maybe_get_mask(values, skipna, mask) if is_any_int_dtype(dtype): values = values.astype("f8") if mask is not None: values[mask] = np.nan if is_float_dtype(values.dtype): count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(values.shape, mask, axis, ddof) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) if mask is not None: np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype, copy=False) return result AxisInt = int def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) The provided code snippet includes necessary dependencies for implementing the `nansem` function. Write a Python function `def nansem( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258 Here is the function: def nansem( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258 """ # This checks if non-numeric-like data is passed with numeric_only=False # and raises a TypeError otherwise nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) mask = _maybe_get_mask(values, skipna, mask) if not is_float_dtype(values.dtype): values = values.astype("f8") if not skipna and mask is not None and mask.any(): return np.nan count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) return np.sqrt(var) / np.sqrt(count)
Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258
173,327
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array class bottleneck_switch: def __init__(self, name=None, **kwargs) -> None: def __call__(self, alt: F) -> F: def f( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, **kwds, ): def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: def _datetimelike_compat(func: F) -> F: def _maybe_null_out( result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int = 1, ) -> np.ndarray | float | NaTType: AxisInt = int Dtype = Union["ExtensionDtype", NpDtype] def _nanminmax(meth, fill_value_typ): @bottleneck_switch(name=f"nan{meth}") @_datetimelike_compat def reduction( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> Dtype: values, mask, dtype, dtype_max, fill_value = _get_values( values, skipna, fill_value_typ=fill_value_typ, mask=mask ) if (axis is not None and values.shape[axis] == 0) or values.size == 0: try: result = getattr(values, meth)(axis, dtype=dtype_max) result.fill(np.nan) except (AttributeError, TypeError, ValueError): result = np.nan else: result = getattr(values, meth)(axis) result = _maybe_null_out(result, axis, mask, values.shape) return result return reduction
null
173,328
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _maybe_arg_null_out( result: np.ndarray, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, skipna: bool, ) -> np.ndarray | int: # helper function for nanargmin/nanargmax if mask is None: return result if axis is None or not getattr(result, "ndim", False): if skipna: if mask.all(): return -1 else: if mask.any(): return -1 else: if skipna: na_mask = mask.all(axis) else: na_mask = mask.any(axis) if na_mask.any(): result[na_mask] = -1 return result AxisInt = int The provided code snippet includes necessary dependencies for implementing the `nanargmax` function. Write a Python function `def nanargmax( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> int | np.ndarray` to solve the following problem: Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of max value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(arr) 4 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 2] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) array([2, 2, 1, 1]) Here is the function: def nanargmax( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> int | np.ndarray: """ Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of max value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(arr) 4 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 2] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) array([2, 2, 1, 1]) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask) # error: Need type annotation for 'result' result = values.argmax(axis) # type: ignore[var-annotated] result = _maybe_arg_null_out(result, axis, mask, skipna) return result
Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of max value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(arr) 4 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 2] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) array([2, 2, 1, 1])
173,329
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _get_values( values: np.ndarray, skipna: bool, fill_value: Any = None, fill_value_typ: str | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None, np.dtype, np.dtype, Any]: """ Utility to get the values view, mask, dtype, dtype_max, and fill_value. If both mask and fill_value/fill_value_typ are not None and skipna is True, the values array will be copied. For input arrays of boolean or integer dtypes, copies will only occur if a precomputed mask, a fill_value/fill_value_typ, and skipna=True are provided. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped fill_value : Any value to fill NaNs with fill_value_typ : str Set to '+inf' or '-inf' to handle dtype-specific infinities mask : Optional[np.ndarray[bool]] nan-mask if known Returns ------- values : ndarray Potential copy of input value array mask : Optional[ndarray[bool]] Mask for values, if deemed necessary to compute dtype : np.dtype dtype for values dtype_max : np.dtype platform independent dtype fill_value : Any fill value used """ # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # np.where call below assert is_scalar(fill_value) # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if needs_i8_conversion(values.dtype): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = np.asarray(values.view("i8")) datetimelike = True dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value( dtype, fill_value=fill_value, fill_value_typ=fill_value_typ ) if skipna and (mask is not None) and (fill_value is not None): if mask.any(): if dtype_ok or datetimelike: values = values.copy() np.putmask(values, mask, fill_value) else: # np.where will promote if needed values = np.where(~mask, values, fill_value) # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.dtype(np.int64) elif is_float_dtype(dtype): dtype_max = np.dtype(np.float64) return values, mask, dtype, dtype_max, fill_value def _maybe_arg_null_out( result: np.ndarray, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, skipna: bool, ) -> np.ndarray | int: # helper function for nanargmin/nanargmax if mask is None: return result if axis is None or not getattr(result, "ndim", False): if skipna: if mask.all(): return -1 else: if mask.any(): return -1 else: if skipna: na_mask = mask.all(axis) else: na_mask = mask.any(axis) if na_mask.any(): result[na_mask] = -1 return result AxisInt = int The provided code snippet includes necessary dependencies for implementing the `nanargmin` function. Write a Python function `def nanargmin( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> int | np.ndarray` to solve the following problem: Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of min value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(arr) 0 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 0] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [nan, 7., 8.], [nan, 10., 11.]]) >>> nanops.nanargmin(arr, axis=1) array([0, 0, 1, 1]) Here is the function: def nanargmin( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> int | np.ndarray: """ Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of min value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(arr) 0 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 0] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [nan, 7., 8.], [nan, 10., 11.]]) >>> nanops.nanargmin(arr, axis=1) array([0, 0, 1, 1]) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask) # error: Need type annotation for 'result' result = values.argmin(axis) # type: ignore[var-annotated] result = _maybe_arg_null_out(result, axis, mask, skipna) return result
Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of min value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(arr) 0 >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 0] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [nan, 7., 8.], [nan, 10., 11.]]) >>> nanops.nanargmin(arr, axis=1) array([0, 0, 1, 1])
173,330
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _maybe_get_mask( values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None ) -> npt.NDArray[np.bool_] | None: """ Compute a mask if and only if necessary. This function will compute a mask iff it is necessary. Otherwise, return the provided mask (potentially None) when a mask does not need to be computed. A mask is never necessary if the values array is of boolean or integer dtypes, as these are incapable of storing NaNs. If passing a NaN-capable dtype that is interpretable as either boolean or integer data (eg, timedelta64), a mask must be provided. If the skipna parameter is False, a new mask will not be computed. The mask is computed using isna() by default. Setting invert=True selects notna() as the masking function. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped mask : Optional[ndarray] nan-mask if known Returns ------- Optional[np.ndarray[bool]] """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): # Boolean data cannot contain nulls, so signal via mask being None return None if skipna or needs_i8_conversion(values.dtype): mask = isna(values) return mask def _get_counts( values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, dtype: np.dtype = np.dtype(np.float64), ) -> float | np.ndarray: """ Get the count of non-null values along an axis Parameters ---------- values_shape : tuple of int shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along dtype : type, optional type to use for count Returns ------- count : scalar or array """ if axis is None: if mask is not None: n = mask.size - mask.sum() else: n = np.prod(values_shape) return dtype.type(n) if mask is not None: count = mask.shape[axis] - mask.sum(axis) else: count = values_shape[axis] if is_scalar(count): return dtype.type(count) return count.astype(dtype, copy=False) def _zero_out_fperr(arg): # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): with np.errstate(invalid="ignore"): return np.where(np.abs(arg) < 1e-14, 0, arg) else: return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg AxisInt = int def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... ) -> ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... ) -> T | ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndex, ABCSeries)): if isinstance(obj, ABCRangeIndex): if extract_range: return obj._values # https://github.com/python/mypy/issues/1081 # error: Incompatible return value type (got "RangeIndex", expected # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]") return obj # type: ignore[return-value] return obj._values elif extract_numpy and isinstance(obj, ABCPandasArray): return obj.to_numpy() return obj The provided code snippet includes necessary dependencies for implementing the `nanskew` function. Write a Python function `def nanskew( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787 Here is the function: def nanskew( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787 """ # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) if not is_float_dtype(values.dtype): values = values.astype("f8") count = _get_counts(values.shape, mask, axis) else: count = _get_counts(values.shape, mask, axis, dtype=values.dtype) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) elif not skipna and mask is not None and mask.any(): return np.nan mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna and mask is not None: np.putmask(adjusted, mask, 0) adjusted2 = adjusted**2 adjusted3 = adjusted2 * adjusted m2 = adjusted2.sum(axis, dtype=np.float64) m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error # # #18044 in _libs/windows.pyx calc_skew follow this behavior # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) with np.errstate(invalid="ignore", divide="ignore"): result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5) dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype, copy=False) if isinstance(result, np.ndarray): result = np.where(m2 == 0, 0, result) result[count < 3] = np.nan else: result = 0 if m2 == 0 else result if count < 3: return np.nan return result
Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787
173,331
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _maybe_get_mask( values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None ) -> npt.NDArray[np.bool_] | None: """ Compute a mask if and only if necessary. This function will compute a mask iff it is necessary. Otherwise, return the provided mask (potentially None) when a mask does not need to be computed. A mask is never necessary if the values array is of boolean or integer dtypes, as these are incapable of storing NaNs. If passing a NaN-capable dtype that is interpretable as either boolean or integer data (eg, timedelta64), a mask must be provided. If the skipna parameter is False, a new mask will not be computed. The mask is computed using isna() by default. Setting invert=True selects notna() as the masking function. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped mask : Optional[ndarray] nan-mask if known Returns ------- Optional[np.ndarray[bool]] """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): # Boolean data cannot contain nulls, so signal via mask being None return None if skipna or needs_i8_conversion(values.dtype): mask = isna(values) return mask def _get_counts( values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, dtype: np.dtype = np.dtype(np.float64), ) -> float | np.ndarray: """ Get the count of non-null values along an axis Parameters ---------- values_shape : tuple of int shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing axis : Optional[int] axis to count along dtype : type, optional type to use for count Returns ------- count : scalar or array """ if axis is None: if mask is not None: n = mask.size - mask.sum() else: n = np.prod(values_shape) return dtype.type(n) if mask is not None: count = mask.shape[axis] - mask.sum(axis) else: count = values_shape[axis] if is_scalar(count): return dtype.type(count) return count.astype(dtype, copy=False) def _zero_out_fperr(arg): # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): with np.errstate(invalid="ignore"): return np.where(np.abs(arg) < 1e-14, 0, arg) else: return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg AxisInt = int def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... ) -> ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... ) -> T | ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndex, ABCSeries)): if isinstance(obj, ABCRangeIndex): if extract_range: return obj._values # https://github.com/python/mypy/issues/1081 # error: Incompatible return value type (got "RangeIndex", expected # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]") return obj # type: ignore[return-value] return obj._values elif extract_numpy and isinstance(obj, ABCPandasArray): return obj.to_numpy() return obj The provided code snippet includes necessary dependencies for implementing the `nankurt` function. Write a Python function `def nankurt( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076 Here is the function: def nankurt( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076 """ # error: Incompatible types in assignment (expression has type "Union[Any, # Union[ExtensionArray, ndarray]]", variable has type "ndarray") values = extract_array(values, extract_numpy=True) # type: ignore[assignment] mask = _maybe_get_mask(values, skipna, mask) if not is_float_dtype(values.dtype): values = values.astype("f8") count = _get_counts(values.shape, mask, axis) else: count = _get_counts(values.shape, mask, axis, dtype=values.dtype) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) elif not skipna and mask is not None and mask.any(): return np.nan mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna and mask is not None: np.putmask(adjusted, mask, 0) adjusted2 = adjusted**2 adjusted4 = adjusted2**2 m2 = adjusted2.sum(axis, dtype=np.float64) m4 = adjusted4.sum(axis, dtype=np.float64) with np.errstate(invalid="ignore", divide="ignore"): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numerator = count * (count + 1) * (count - 1) * m4 denominator = (count - 2) * (count - 3) * m2**2 # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior # to fix the fperr to treat denom <1e-14 as zero numerator = _zero_out_fperr(numerator) denominator = _zero_out_fperr(denominator) if not isinstance(denominator, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before # doing division if count < 4: return np.nan if denominator == 0: return 0 with np.errstate(invalid="ignore", divide="ignore"): result = numerator / denominator - adj dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype, copy=False) if isinstance(result, np.ndarray): result = np.where(denominator == 0, 0, result) result[count < 4] = np.nan return result
Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076
173,332
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def _maybe_get_mask( values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None ) -> npt.NDArray[np.bool_] | None: """ Compute a mask if and only if necessary. This function will compute a mask iff it is necessary. Otherwise, return the provided mask (potentially None) when a mask does not need to be computed. A mask is never necessary if the values array is of boolean or integer dtypes, as these are incapable of storing NaNs. If passing a NaN-capable dtype that is interpretable as either boolean or integer data (eg, timedelta64), a mask must be provided. If the skipna parameter is False, a new mask will not be computed. The mask is computed using isna() by default. Setting invert=True selects notna() as the masking function. Parameters ---------- values : ndarray input array to potentially compute mask for skipna : bool boolean for whether NaNs should be skipped mask : Optional[ndarray] nan-mask if known Returns ------- Optional[np.ndarray[bool]] """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): # Boolean data cannot contain nulls, so signal via mask being None return None if skipna or needs_i8_conversion(values.dtype): mask = isna(values) return mask def _maybe_null_out( result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int = 1, ) -> np.ndarray | float | NaTType: """ Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) """ if mask is None and min_count == 0: # nothing to check; short-circuit return result if axis is not None and isinstance(result, np.ndarray): if mask is not None: null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 else: # we have no nulls, kept mask=None in _maybe_get_mask below_count = shape[axis] - min_count < 0 new_shape = shape[:axis] + shape[axis + 1 :] null_mask = np.broadcast_to(below_count, new_shape) if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): result = result.astype("c16") elif not is_float_dtype(result): result = result.astype("f8", copy=False) result[null_mask] = np.nan else: # GH12941, use None to auto cast null result[null_mask] = None elif result is not NaT: if check_below_min_count(shape, mask, min_count): result_dtype = getattr(result, "dtype", None) if is_float_dtype(result_dtype): # error: Item "None" of "Optional[Any]" has no attribute "type" result = result_dtype.type("nan") # type: ignore[union-attr] else: result = np.nan return result AxisInt = int The provided code snippet includes necessary dependencies for implementing the `nanprod` function. Write a Python function `def nanprod( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, min_count: int = 0, mask: npt.NDArray[np.bool_] | None = None, ) -> float` to solve the following problem: Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 Here is the function: def nanprod( values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, min_count: int = 0, mask: npt.NDArray[np.bool_] | None = None, ) -> float: """ Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 """ mask = _maybe_get_mask(values, skipna, mask) if skipna and mask is not None: values = values.copy() values[mask] = 1 result = values.prod(axis) # error: Incompatible return value type (got "Union[ndarray, float]", expected # "float") return _maybe_null_out( # type: ignore[return-value] result, axis, mask, values.shape, min_count=min_count )
Parameters ---------- values : ndarray[dtype] axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1) Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0
173,333
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def get_corr_func( method: CorrelationMethod, ) -> Callable[[np.ndarray, np.ndarray], float]: if method == "kendall": from scipy.stats import kendalltau def func(a, b): return kendalltau(a, b)[0] return func elif method == "spearman": from scipy.stats import spearmanr def func(a, b): return spearmanr(a, b)[0] return func elif method == "pearson": def func(a, b): return np.corrcoef(a, b)[0, 1] return func elif callable(method): return method raise ValueError( f"Unknown method '{method}', expected one of " "'kendall', 'spearman', 'pearson', or callable" ) CorrelationMethod = Union[ Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float] ] def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res The provided code snippet includes necessary dependencies for implementing the `nancorr` function. Write a Python function `def nancorr( a: np.ndarray, b: np.ndarray, *, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float` to solve the following problem: a, b: ndarrays Here is the function: def nancorr( a: np.ndarray, b: np.ndarray, *, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ a, b: ndarrays """ if len(a) != len(b): raise AssertionError("Operands to nancorr must have same size") if min_periods is None: min_periods = 1 valid = notna(a) & notna(b) if not valid.all(): a = a[valid] b = b[valid] if len(a) < min_periods: return np.nan f = get_corr_func(method) return f(a, b)
a, b: ndarrays
173,334
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res def nancov( a: np.ndarray, b: np.ndarray, *, min_periods: int | None = None, ddof: int | None = 1, ) -> float: if len(a) != len(b): raise AssertionError("Operands to nancov must have same size") if min_periods is None: min_periods = 1 valid = notna(a) & notna(b) if not valid.all(): a = a[valid] b = b[valid] if len(a) < min_periods: return np.nan return np.cov(a, b, ddof=ddof)[0, 1]
null
173,335
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) def make_nancomp(op): def f(x, y): xmask = isna(x) ymask = isna(y) mask = xmask | ymask with np.errstate(all="ignore"): result = op(x, y) if mask.any(): if is_bool_dtype(result): result = result.astype("O") np.putmask(result, mask, np.nan) return result return f
null
173,336
from __future__ import annotations import functools import itertools import operator from typing import ( Any, Callable, cast, ) import warnings import numpy as np from pandas._config import get_option from pandas._libs import ( NaT, NaTType, iNaT, lib, ) from pandas._typing import ( ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_int_dtype, is_bool_dtype, is_complex, is_datetime64_any_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, ) from pandas.core.construction import extract_array ArrayLike = Union["ExtensionArray", np.ndarray] def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) The provided code snippet includes necessary dependencies for implementing the `na_accum_func` function. Write a Python function `def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike` to solve the following problem: Cumulative function with skipna support. Parameters ---------- values : np.ndarray or ExtensionArray accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} skipna : bool Returns ------- np.ndarray or ExtensionArray Here is the function: def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike: """ Cumulative function with skipna support. Parameters ---------- values : np.ndarray or ExtensionArray accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} skipna : bool Returns ------- np.ndarray or ExtensionArray """ mask_a, mask_b = { np.cumprod: (1.0, np.nan), np.maximum.accumulate: (-np.inf, np.nan), np.cumsum: (0.0, np.nan), np.minimum.accumulate: (np.inf, np.nan), }[accum_func] # This should go through ea interface assert values.dtype.kind not in ["m", "M"] # We will be applying this function to block values if skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)): vals = values.copy() mask = isna(vals) vals[mask] = mask_a result = accum_func(vals, axis=0) result[mask] = mask_b else: result = accum_func(values, axis=0) return result
Cumulative function with skipna support. Parameters ---------- values : np.ndarray or ExtensionArray accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} skipna : bool Returns ------- np.ndarray or ExtensionArray
173,337
from __future__ import annotations import numba import numpy as np from pandas.core._numba.kernels.shared import is_monotonic_increasing def add_sum( val: float, nobs: int, sum_x: float, compensation: float, num_consecutive_same_value: int, prev_value: float, ) -> tuple[int, float, float, int, float]: if not np.isnan(val): nobs += 1 y = val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val return nobs, sum_x, compensation, num_consecutive_same_value, prev_value def remove_sum( val: float, nobs: int, sum_x: float, compensation: float ) -> tuple[int, float, float]: if not np.isnan(val): nobs -= 1 y = -val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t return nobs, sum_x, compensation # error: Any? not callable # type: ignore[misc] ) def is_monotonic_increasing(bounds: np.ndarray) -> bool: """Check if int64 values are monotonically increasing.""" n = len(bounds) if n < 2: return True prev = bounds[0] for i in range(1, n): cur = bounds[i] if cur < prev: return False prev = cur return True def sliding_sum( values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, ) -> np.ndarray: N = len(start) nobs = 0 sum_x = 0.0 compensation_add = 0.0 compensation_remove = 0.0 is_monotonic_increasing_bounds = is_monotonic_increasing( start ) and is_monotonic_increasing(end) output = np.empty(N, dtype=np.float64) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] ( nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value, ) = add_sum( val, nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value, ) else: for j in range(start[i - 1], s): val = values[j] nobs, sum_x, compensation_remove = remove_sum( val, nobs, sum_x, compensation_remove ) for j in range(end[i - 1], e): val = values[j] ( nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value, ) = add_sum( val, nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value, ) if nobs == 0 == min_periods: result = 0.0 elif nobs >= min_periods: if num_consecutive_same_value >= nobs: result = prev_value * nobs else: result = sum_x else: result = np.nan output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 sum_x = 0.0 compensation_remove = 0.0 return output
null
173,338
from __future__ import annotations import numba import numpy as np from pandas.core._numba.kernels.shared import is_monotonic_increasing def add_mean( val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float, num_consecutive_same_value: int, prev_value: float, ) -> tuple[int, float, int, float, int, float]: if not np.isnan(val): nobs += 1 y = val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val < 0: neg_ct += 1 if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value def remove_mean( val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float ) -> tuple[int, float, int, float]: if not np.isnan(val): nobs -= 1 y = -val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val < 0: neg_ct -= 1 return nobs, sum_x, neg_ct, compensation # error: Any? not callable # type: ignore[misc] ) def is_monotonic_increasing(bounds: np.ndarray) -> bool: """Check if int64 values are monotonically increasing.""" n = len(bounds) if n < 2: return True prev = bounds[0] for i in range(1, n): cur = bounds[i] if cur < prev: return False prev = cur return True def sliding_mean( values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, ) -> np.ndarray: N = len(start) nobs = 0 sum_x = 0.0 neg_ct = 0 compensation_add = 0.0 compensation_remove = 0.0 is_monotonic_increasing_bounds = is_monotonic_increasing( start ) and is_monotonic_increasing(end) output = np.empty(N, dtype=np.float64) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] ( nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value, ) = add_mean( val, nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value, ) else: for j in range(start[i - 1], s): val = values[j] nobs, sum_x, neg_ct, compensation_remove = remove_mean( val, nobs, sum_x, neg_ct, compensation_remove ) for j in range(end[i - 1], e): val = values[j] ( nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value, ) = add_mean( val, nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value, ) if nobs >= min_periods and nobs > 0: result = sum_x / nobs if num_consecutive_same_value >= nobs: result = prev_value elif neg_ct == 0 and result < 0: result = 0 elif neg_ct == nobs and result > 0: result = 0 else: result = np.nan output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 sum_x = 0.0 neg_ct = 0 compensation_remove = 0.0 return output
null
173,339
from __future__ import annotations import numba import numpy as np from pandas.core._numba.kernels.shared import is_monotonic_increasing def add_var( val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float, num_consecutive_same_value: int, prev_value: float, ) -> tuple[int, float, float, float, int, float]: if not np.isnan(val): if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val nobs += 1 prev_mean = mean_x - compensation y = val - compensation t = y - mean_x compensation = t + mean_x - y delta = t if nobs: mean_x += delta / nobs else: mean_x = 0 ssqdm_x += (val - prev_mean) * (val - mean_x) return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value def remove_var( val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float ) -> tuple[int, float, float, float]: if not np.isnan(val): nobs -= 1 if nobs: prev_mean = mean_x - compensation y = val - compensation t = y - mean_x compensation = t + mean_x - y delta = t mean_x -= delta / nobs ssqdm_x -= (val - prev_mean) * (val - mean_x) else: mean_x = 0 ssqdm_x = 0 return nobs, mean_x, ssqdm_x, compensation # error: Any? not callable # type: ignore[misc] ) def is_monotonic_increasing(bounds: np.ndarray) -> bool: """Check if int64 values are monotonically increasing.""" n = len(bounds) if n < 2: return True prev = bounds[0] for i in range(1, n): cur = bounds[i] if cur < prev: return False prev = cur return True def sliding_var( values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, ddof: int = 1, ) -> np.ndarray: N = len(start) nobs = 0 mean_x = 0.0 ssqdm_x = 0.0 compensation_add = 0.0 compensation_remove = 0.0 min_periods = max(min_periods, 1) is_monotonic_increasing_bounds = is_monotonic_increasing( start ) and is_monotonic_increasing(end) output = np.empty(N, dtype=np.float64) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] ( nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value, ) = add_var( val, nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value, ) else: for j in range(start[i - 1], s): val = values[j] nobs, mean_x, ssqdm_x, compensation_remove = remove_var( val, nobs, mean_x, ssqdm_x, compensation_remove ) for j in range(end[i - 1], e): val = values[j] ( nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value, ) = add_var( val, nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value, ) if nobs >= min_periods and nobs > ddof: if nobs == 1 or num_consecutive_same_value >= nobs: result = 0.0 else: result = ssqdm_x / (nobs - ddof) else: result = np.nan output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 mean_x = 0.0 ssqdm_x = 0.0 compensation_remove = 0.0 return output
null
173,340
from __future__ import annotations import numba import numpy as np def sliding_min_max( values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, is_max: bool, ) -> np.ndarray: N = len(start) nobs = 0 output = np.empty(N, dtype=np.float64) # Use deque once numba supports it # https://github.com/numba/numba/issues/7417 Q: list = [] W: list = [] for i in range(N): curr_win_size = end[i] - start[i] if i == 0: st = start[i] else: st = end[i - 1] for k in range(st, end[i]): ai = values[k] if not np.isnan(ai): nobs += 1 elif is_max: ai = -np.inf else: ai = np.inf # Discard previous entries if we find new min or max if is_max: while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): Q.pop() else: while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): Q.pop() Q.append(k) W.append(k) # Discard entries outside and left of current window while Q and Q[0] <= start[i] - 1: Q.pop(0) while W and W[0] <= start[i] - 1: if not np.isnan(values[W[0]]): nobs -= 1 W.pop(0) # Save output based on index in input value array if Q and curr_win_size > 0 and nobs >= min_periods: output[i] = values[Q[0]] else: output[i] = np.nan return output
null
173,341
from __future__ import annotations import functools from typing import ( TYPE_CHECKING, Callable, ) import numpy as np from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency TYPE_CHECKING = True class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module The provided code snippet includes necessary dependencies for implementing the `generate_shared_aggregator` function. Write a Python function `def generate_shared_aggregator( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, )` to solve the following problem: Generate a Numba function that loops over the columns 2D object and applies a 1D numba kernel over each column. Parameters ---------- func : function aggregation function to be applied to each column nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function Here is the function: def generate_shared_aggregator( func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool, ): """ Generate a Numba function that loops over the columns 2D object and applies a 1D numba kernel over each column. Parameters ---------- func : function aggregation function to be applied to each column nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function """ if TYPE_CHECKING: import numba else: numba = import_optional_dependency("numba") @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def column_looper( values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, *args, ): result = np.empty((len(start), values.shape[1]), dtype=np.float64) for i in numba.prange(values.shape[1]): result[:, i] = func(values[:, i], start, end, min_periods, *args) return result return column_looper
Generate a Numba function that loops over the columns 2D object and applies a 1D numba kernel over each column. Parameters ---------- func : function aggregation function to be applied to each column nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function
173,342
from __future__ import annotations import operator from typing import Any import numpy as np from pandas._libs import lib from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op from pandas.core.dtypes.generic import ABCNDFrame from pandas.core import roperator from pandas.core.construction import extract_array from pandas.core.ops.common import unpack_zerodim_and_defer def _standardize_out_kwarg(**kwargs) -> dict: """ If kwargs contain "out1" and "out2", replace that with a tuple "out" np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or `out1=out1, out2=out2)` """ if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs: out1 = kwargs.pop("out1") out2 = kwargs.pop("out2") out = (out1, out2) kwargs["out"] = out return kwargs def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): """ If we have an `out` keyword, then call the ufunc without `out` and then set the result into the given `out`. """ # Note: we assume _standardize_out_kwarg has already been called. out = kwargs.pop("out") where = kwargs.pop("where", None) result = getattr(ufunc, method)(*inputs, **kwargs) if result is NotImplemented: return NotImplemented if isinstance(result, tuple): # i.e. np.divmod, np.modf, np.frexp if not isinstance(out, tuple) or len(out) != len(result): raise NotImplementedError for arr, res in zip(out, result): _assign_where(arr, res, where) return out if isinstance(out, tuple): if len(out) == 1: out = out[0] else: raise NotImplementedError _assign_where(out, result, where) return out def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): """ Fallback to the behavior we would get if we did not define __array_ufunc__. Notes ----- We are assuming that `self` is among `inputs`. """ if not any(x is self for x in inputs): raise NotImplementedError new_inputs = [x if x is not self else np.asarray(x) for x in inputs] return getattr(ufunc, method)(*new_inputs, **kwargs) def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): """ Dispatch ufunc reductions to self's reduction methods. """ assert method == "reduce" if len(inputs) != 1 or inputs[0] is not self: return NotImplemented if ufunc.__name__ not in REDUCTION_ALIASES: return NotImplemented method_name = REDUCTION_ALIASES[ufunc.__name__] # NB: we are assuming that min/max represent minimum/maximum methods, # which would not be accurate for e.g. Timestamp.min if not hasattr(self, method_name): return NotImplemented if self.ndim > 1: if isinstance(self, ABCNDFrame): # TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA kwargs["numeric_only"] = False if "axis" not in kwargs: # For DataFrame reductions we don't want the default axis=0 # Note: np.min is not a ufunc, but uses array_function_dispatch, # so calls DataFrame.min (without ever getting here) with the np.min # default of axis=None, which DataFrame.min catches and changes to axis=0. # np.minimum.reduce(df) gets here bc axis is not in kwargs, # so we set axis=0 to match the behaviorof np.minimum.reduce(df.values) kwargs["axis"] = 0 # By default, numpy's reductions do not skip NaNs, so we have to # pass skipna=False return getattr(self, method_name)(skipna=False, **kwargs) Any = object() def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... ) -> ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... ) -> T | ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndex, ABCSeries)): if isinstance(obj, ABCRangeIndex): if extract_range: return obj._values # https://github.com/python/mypy/issues/1081 # error: Incompatible return value type (got "RangeIndex", expected # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]") return obj # type: ignore[return-value] return obj._values elif extract_numpy and isinstance(obj, ABCPandasArray): return obj.to_numpy() return obj class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") The provided code snippet includes necessary dependencies for implementing the `array_ufunc` function. Write a Python function `def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any)` to solve the following problem: Compatibility with numpy ufuncs. See also -------- numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ Here is the function: def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): """ Compatibility with numpy ufuncs. See also -------- numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ """ from pandas.core.frame import ( DataFrame, Series, ) from pandas.core.generic import NDFrame from pandas.core.internals import BlockManager cls = type(self) kwargs = _standardize_out_kwarg(**kwargs) # for binary ops, use our custom dunder methods result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result # Determine if we should defer. no_defer = ( np.ndarray.__array_ufunc__, cls.__array_ufunc__, ) for item in inputs: higher_priority = ( hasattr(item, "__array_priority__") and item.__array_priority__ > self.__array_priority__ ) has_array_ufunc = ( hasattr(item, "__array_ufunc__") and type(item).__array_ufunc__ not in no_defer and not isinstance(item, self._HANDLED_TYPES) ) if higher_priority or has_array_ufunc: return NotImplemented # align all the inputs. types = tuple(type(x) for x in inputs) alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)] if len(alignable) > 1: # This triggers alignment. # At the moment, there aren't any ufuncs with more than two inputs # so this ends up just being x1.index | x2.index, but we write # it to handle *args. set_types = set(types) if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types): # We currently don't handle ufunc(DataFrame, Series) # well. Previously this raised an internal ValueError. We might # support it someday, so raise a NotImplementedError. raise NotImplementedError( f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs." ) axes = self.axes for obj in alignable[1:]: # this relies on the fact that we aren't handling mixed # series / frame ufuncs. for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)): axes[i] = ax1.union(ax2) reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes)) inputs = tuple( x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x for x, t in zip(inputs, types) ) else: reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes)) if self.ndim == 1: names = [getattr(x, "name") for x in inputs if hasattr(x, "name")] name = names[0] if len(set(names)) == 1 else None reconstruct_kwargs = {"name": name} else: reconstruct_kwargs = {} def reconstruct(result): if ufunc.nout > 1: # np.modf, np.frexp, np.divmod return tuple(_reconstruct(x) for x in result) return _reconstruct(result) def _reconstruct(result): if lib.is_scalar(result): return result if result.ndim != self.ndim: if method == "outer": raise NotImplementedError return result if isinstance(result, BlockManager): # we went through BlockManager.apply e.g. np.sqrt result = self._constructor(result, **reconstruct_kwargs, copy=False) else: # we converted an array, lost our axes result = self._constructor( result, **reconstruct_axes, **reconstruct_kwargs, copy=False ) # TODO: When we support multiple values in __finalize__, this # should pass alignable to `__finalize__` instead of self. # Then `np.add(a, b)` would consider attrs from both a and b # when a and b are NDFrames. if len(alignable) == 1: result = result.__finalize__(self) return result if "out" in kwargs: # e.g. test_multiindex_get_loc result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) return reconstruct(result) if method == "reduce": # e.g. test.series.test_ufunc.test_reduce result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result # We still get here with kwargs `axis` for e.g. np.maximum.accumulate # and `dtype` and `keepdims` for np.ptp if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1): # Just give up on preserving types in the complex case. # In theory we could preserve them for them. # * nout>1 is doable if BlockManager.apply took nout and # returned a Tuple[BlockManager]. # * len(inputs) > 1 is doable when we know that we have # aligned blocks / dtypes. # e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add inputs = tuple(np.asarray(x) for x in inputs) # Note: we can't use default_array_ufunc here bc reindexing means # that `self` may not be among `inputs` result = getattr(ufunc, method)(*inputs, **kwargs) elif self.ndim == 1: # ufunc(series, ...) inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) result = getattr(ufunc, method)(*inputs, **kwargs) else: # ufunc(dataframe) if method == "__call__" and not kwargs: # for np.<ufunc>(..) calls # kwargs cannot necessarily be handled block-by-block, so only # take this path if there are no kwargs mgr = inputs[0]._mgr result = mgr.apply(getattr(ufunc, method)) else: # otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..)) # Those can have an axis keyword and thus can't be called block-by-block result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) # e.g. np.negative (only one reached), with "where" and "out" in kwargs result = reconstruct(result) return result
Compatibility with numpy ufuncs. See also -------- numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
173,343
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `is_valid_positional_slice` function. Write a Python function `def is_valid_positional_slice(slc: slice) -> bool` to solve the following problem: Check if a slice object can be interpreted as a positional indexer. Parameters ---------- slc : slice Returns ------- bool Notes ----- A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced. Here is the function: def is_valid_positional_slice(slc: slice) -> bool: """ Check if a slice object can be interpreted as a positional indexer. Parameters ---------- slc : slice Returns ------- bool Notes ----- A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced. """ def is_int_or_none(val): return val is None or is_integer(val) return ( is_int_or_none(slc.start) and is_int_or_none(slc.stop) and is_int_or_none(slc.step) )
Check if a slice object can be interpreted as a positional indexer. Parameters ---------- slc : slice Returns ------- bool Notes ----- A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced.
173,344
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `is_scalar_indexer` function. Write a Python function `def is_scalar_indexer(indexer, ndim: int) -> bool` to solve the following problem: Return True if we are all scalar indexers. Parameters ---------- indexer : object ndim : int Number of dimensions in the object being indexed. Returns ------- bool Here is the function: def is_scalar_indexer(indexer, ndim: int) -> bool: """ Return True if we are all scalar indexers. Parameters ---------- indexer : object ndim : int Number of dimensions in the object being indexed. Returns ------- bool """ if ndim == 1 and is_integer(indexer): # GH37748: allow indexer to be an integer for Series return True if isinstance(indexer, tuple) and len(indexer) == ndim: return all(is_integer(x) for x in indexer) return False
Return True if we are all scalar indexers. Parameters ---------- indexer : object ndim : int Number of dimensions in the object being indexed. Returns ------- bool
173,345
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `is_empty_indexer` function. Write a Python function `def is_empty_indexer(indexer) -> bool` to solve the following problem: Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool Here is the function: def is_empty_indexer(indexer) -> bool: """ Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool """ if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool
173,346
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) def length_of_indexer(indexer, target=None) -> int: """ Return the expected length of target[indexer] Returns ------- int """ if target is not None and isinstance(indexer, slice): target_len = len(target) start = indexer.start stop = indexer.stop step = indexer.step if start is None: start = 0 elif start < 0: start += target_len if stop is None or stop > target_len: stop = target_len elif stop < 0: stop += target_len if step is None: step = 1 elif step < 0: start, stop = stop + 1, start + 1 step = -step return (stop - start + step - 1) // step elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): if isinstance(indexer, list): indexer = np.array(indexer) if indexer.dtype == bool: # GH#25774 return indexer.sum() return len(indexer) elif isinstance(indexer, range): return (indexer.stop - indexer.start) // indexer.step elif not is_list_like_indexer(indexer): return 1 raise AssertionError("cannot find the length of the indexer") def array( data: Sequence[object] | AnyArrayLike, dtype: Dtype | None = None, copy: bool = True, ) -> ExtensionArray: """ Create an array. Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ======================================= Scalar Type Array Type ============================== ======================================= :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` or :class:`pandas.arrays.ArrowStringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ======================================= The ExtensionArray created when the scalar type is :class:`str` is determined by ``pd.options.mode.string_storage`` if the dtype is not explicitly given. For all other cases, NumPy's usual inference rules will be used. .. versionchanged:: 1.2.0 Pandas now also infers nullable-floating dtype for float-like input data copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.PandasArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``PandasArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> with pd.option_context("string_storage", "pyarrow"): ... arr = pd.array(["a", None, "c"]) ... >>> arr <ArrowStringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <PandasArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <PandasArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( BooleanArray, DatetimeArray, ExtensionArray, FloatingArray, IntegerArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) elif isinstance(data, ABCDataFrame): raise TypeError("Cannot pass DataFrame to 'pandas.array'") if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype data = extract_array(data, extract_numpy=True) # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype if isinstance(data, ExtensionArray) and ( dtype is None or is_dtype_equal(dtype, data.dtype) ): # e.g. TimedeltaArray[s], avoid casting to PandasArray if copy: return data.copy() return data if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) return PeriodArray._from_sequence(period_data, copy=copy) elif inferred_dtype == "interval": return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith("timedelta"): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) elif inferred_dtype == "string": # StringArray/ArrowStringArray depending on pd.options.mode.string_storage return StringDtype().construct_array_type()._from_sequence(data, copy=copy) elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) elif ( inferred_dtype in ("floating", "mixed-integer-float") and getattr(data, "dtype", None) != np.float16 ): # GH#44715 Exclude np.float16 bc FloatingArray does not support it; # we will fall back to PandasArray. return FloatingArray._from_sequence(data, copy=copy) elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns] # 2. timedelta64[ns] # so that a DatetimeArray is returned. if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) return PandasArray._from_sequence(data, dtype=dtype, copy=copy) The provided code snippet includes necessary dependencies for implementing the `check_setitem_lengths` function. Write a Python function `def check_setitem_lengths(indexer, value, values) -> bool` to solve the following problem: Validate that value and indexer are the same length. An special-case is allowed for when the indexer is a boolean array and the number of true values equals the length of ``value``. In this case, no exception is raised. Parameters ---------- indexer : sequence Key for the setitem. value : array-like Value for the setitem. values : array-like Values being set into. Returns ------- bool Whether this is an empty listlike setting which is a no-op. Raises ------ ValueError When the indexer is an ndarray or list and the lengths don't match. Here is the function: def check_setitem_lengths(indexer, value, values) -> bool: """ Validate that value and indexer are the same length. An special-case is allowed for when the indexer is a boolean array and the number of true values equals the length of ``value``. In this case, no exception is raised. Parameters ---------- indexer : sequence Key for the setitem. value : array-like Value for the setitem. values : array-like Values being set into. Returns ------- bool Whether this is an empty listlike setting which is a no-op. Raises ------ ValueError When the indexer is an ndarray or list and the lengths don't match. """ no_op = False if isinstance(indexer, (np.ndarray, list)): # We can ignore other listlikes because they are either # a) not necessarily 1-D indexers, e.g. tuple # b) boolean indexers e.g. BoolArray if is_list_like(value): if len(indexer) != len(value) and values.ndim == 1: # boolean with truth values == len of the value is ok too if isinstance(indexer, list): indexer = np.array(indexer) if not ( isinstance(indexer, np.ndarray) and indexer.dtype == np.bool_ and indexer.sum() == len(value) ): raise ValueError( "cannot set using a list-like indexer " "with a different length than the value" ) if not len(indexer): no_op = True elif isinstance(indexer, slice): if is_list_like(value): if len(value) != length_of_indexer(indexer, values) and values.ndim == 1: # In case of two dimensional value is used row-wise and broadcasted raise ValueError( "cannot set using a slice indexer with a " "different length than the value" ) if not len(value): no_op = True return no_op
Validate that value and indexer are the same length. An special-case is allowed for when the indexer is a boolean array and the number of true values equals the length of ``value``. In this case, no exception is raised. Parameters ---------- indexer : sequence Key for the setitem. value : array-like Value for the setitem. values : array-like Values being set into. Returns ------- bool Whether this is an empty listlike setting which is a no-op. Raises ------ ValueError When the indexer is an ndarray or list and the lengths don't match.
173,347
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `validate_indices` function. Write a Python function `def validate_indices(indices: np.ndarray, n: int) -> None` to solve the following problem: Perform bounds-checking for an indexer. -1 is allowed for indicating missing values. Parameters ---------- indices : ndarray n : int Length of the array being indexed. Raises ------ ValueError Examples -------- >>> validate_indices(np.array([1, 2]), 3) # OK >>> validate_indices(np.array([1, -2]), 3) Traceback (most recent call last): ... ValueError: negative dimensions are not allowed >>> validate_indices(np.array([1, 2, 3]), 3) Traceback (most recent call last): ... IndexError: indices are out-of-bounds >>> validate_indices(np.array([-1, -1]), 0) # OK >>> validate_indices(np.array([0, 1]), 0) Traceback (most recent call last): ... IndexError: indices are out-of-bounds Here is the function: def validate_indices(indices: np.ndarray, n: int) -> None: """ Perform bounds-checking for an indexer. -1 is allowed for indicating missing values. Parameters ---------- indices : ndarray n : int Length of the array being indexed. Raises ------ ValueError Examples -------- >>> validate_indices(np.array([1, 2]), 3) # OK >>> validate_indices(np.array([1, -2]), 3) Traceback (most recent call last): ... ValueError: negative dimensions are not allowed >>> validate_indices(np.array([1, 2, 3]), 3) Traceback (most recent call last): ... IndexError: indices are out-of-bounds >>> validate_indices(np.array([-1, -1]), 0) # OK >>> validate_indices(np.array([0, 1]), 0) Traceback (most recent call last): ... IndexError: indices are out-of-bounds """ if len(indices): min_idx = indices.min() if min_idx < -1: msg = f"'indices' contains values less than allowed ({min_idx} < -1)" raise ValueError(msg) max_idx = indices.max() if max_idx >= n: raise IndexError("indices are out-of-bounds")
Perform bounds-checking for an indexer. -1 is allowed for indicating missing values. Parameters ---------- indices : ndarray n : int Length of the array being indexed. Raises ------ ValueError Examples -------- >>> validate_indices(np.array([1, 2]), 3) # OK >>> validate_indices(np.array([1, -2]), 3) Traceback (most recent call last): ... ValueError: negative dimensions are not allowed >>> validate_indices(np.array([1, 2, 3]), 3) Traceback (most recent call last): ... IndexError: indices are out-of-bounds >>> validate_indices(np.array([-1, -1]), 0) # OK >>> validate_indices(np.array([0, 1]), 0) Traceback (most recent call last): ... IndexError: indices are out-of-bounds
173,348
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) def array( data: Sequence[object] | AnyArrayLike, dtype: Dtype | None = None, copy: bool = True, ) -> ExtensionArray: """ Create an array. Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ======================================= Scalar Type Array Type ============================== ======================================= :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` or :class:`pandas.arrays.ArrowStringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ======================================= The ExtensionArray created when the scalar type is :class:`str` is determined by ``pd.options.mode.string_storage`` if the dtype is not explicitly given. For all other cases, NumPy's usual inference rules will be used. .. versionchanged:: 1.2.0 Pandas now also infers nullable-floating dtype for float-like input data copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.PandasArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``PandasArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> with pd.option_context("string_storage", "pyarrow"): ... arr = pd.array(["a", None, "c"]) ... >>> arr <ArrowStringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <PandasArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <PandasArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( BooleanArray, DatetimeArray, ExtensionArray, FloatingArray, IntegerArray, IntervalArray, PandasArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) elif isinstance(data, ABCDataFrame): raise TypeError("Cannot pass DataFrame to 'pandas.array'") if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype data = extract_array(data, extract_numpy=True) # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype if isinstance(data, ExtensionArray) and ( dtype is None or is_dtype_equal(dtype, data.dtype) ): # e.g. TimedeltaArray[s], avoid casting to PandasArray if copy: return data.copy() return data if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) return PeriodArray._from_sequence(period_data, copy=copy) elif inferred_dtype == "interval": return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith("timedelta"): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) elif inferred_dtype == "string": # StringArray/ArrowStringArray depending on pd.options.mode.string_storage return StringDtype().construct_array_type()._from_sequence(data, copy=copy) elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) elif ( inferred_dtype in ("floating", "mixed-integer-float") and getattr(data, "dtype", None) != np.float16 ): # GH#44715 Exclude np.float16 bc FloatingArray does not support it; # we will fall back to PandasArray. return FloatingArray._from_sequence(data, copy=copy) elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns] # 2. timedelta64[ns] # so that a DatetimeArray is returned. if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) return PandasArray._from_sequence(data, dtype=dtype, copy=copy) The provided code snippet includes necessary dependencies for implementing the `maybe_convert_indices` function. Write a Python function `def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray` to solve the following problem: Attempt to convert indices into valid, positive indices. If we have negative indices, translate to positive here. If we have indices that are out-of-bounds, raise an IndexError. Parameters ---------- indices : array-like Array of indices that we are to convert. n : int Number of elements in the array that we are indexing. verify : bool, default True Check that all entries are between 0 and n - 1, inclusive. Returns ------- array-like An array-like of positive indices that correspond to the ones that were passed in initially to this function. Raises ------ IndexError One of the converted indices either exceeded the number of, elements (specified by `n`), or was still negative. Here is the function: def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray: """ Attempt to convert indices into valid, positive indices. If we have negative indices, translate to positive here. If we have indices that are out-of-bounds, raise an IndexError. Parameters ---------- indices : array-like Array of indices that we are to convert. n : int Number of elements in the array that we are indexing. verify : bool, default True Check that all entries are between 0 and n - 1, inclusive. Returns ------- array-like An array-like of positive indices that correspond to the ones that were passed in initially to this function. Raises ------ IndexError One of the converted indices either exceeded the number of, elements (specified by `n`), or was still negative. """ if isinstance(indices, list): indices = np.array(indices) if len(indices) == 0: # If `indices` is empty, np.array will return a float, # and will cause indexing errors. return np.empty(0, dtype=np.intp) mask = indices < 0 if mask.any(): indices = indices.copy() indices[mask] += n if verify: mask = (indices >= n) | (indices < 0) if mask.any(): raise IndexError("indices are out-of-bounds") return indices
Attempt to convert indices into valid, positive indices. If we have negative indices, translate to positive here. If we have indices that are out-of-bounds, raise an IndexError. Parameters ---------- indices : array-like Array of indices that we are to convert. n : int Number of elements in the array that we are indexing. verify : bool, default True Check that all entries are between 0 and n - 1, inclusive. Returns ------- array-like An array-like of positive indices that correspond to the ones that were passed in initially to this function. Raises ------ IndexError One of the converted indices either exceeded the number of, elements (specified by `n`), or was still negative.
173,349
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `disallow_ndim_indexing` function. Write a Python function `def disallow_ndim_indexing(result) -> None` to solve the following problem: Helper function to disallow multi-dimensional indexing on 1D Series/Index. GH#27125 indexer like idx[:, None] expands dim, but we cannot do that and keep an index, so we used to return ndarray, which was deprecated in GH#30588. Here is the function: def disallow_ndim_indexing(result) -> None: """ Helper function to disallow multi-dimensional indexing on 1D Series/Index. GH#27125 indexer like idx[:, None] expands dim, but we cannot do that and keep an index, so we used to return ndarray, which was deprecated in GH#30588. """ if np.ndim(result) > 1: raise ValueError( "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer " "supported. Convert to a numpy array before indexing instead." )
Helper function to disallow multi-dimensional indexing on 1D Series/Index. GH#27125 indexer like idx[:, None] expands dim, but we cannot do that and keep an index, so we used to return ndarray, which was deprecated in GH#30588.
173,350
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `unpack_1tuple` function. Write a Python function `def unpack_1tuple(tup)` to solve the following problem: If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated. Here is the function: def unpack_1tuple(tup): """ If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated. """ if len(tup) == 1 and isinstance(tup[0], slice): # if we don't have a MultiIndex, we may still be able to handle # a 1-tuple. see test_1tuple_without_multiindex if isinstance(tup, list): # GH#31299 raise ValueError( "Indexing with a single-item list containing a " "slice is not allowed. Pass a tuple instead.", ) return tup[0] return tup
If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated.
173,351
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `check_key_length` function. Write a Python function `def check_key_length(columns: Index, key, value: DataFrame) -> None` to solve the following problem: Checks if a key used as indexer has the same length as the columns it is associated with. Parameters ---------- columns : Index The columns of the DataFrame to index. key : A list-like of keys to index with. value : DataFrame The value to set for the keys. Raises ------ ValueError: If the length of key is not equal to the number of columns in value or if the number of columns referenced by key is not equal to number of columns. Here is the function: def check_key_length(columns: Index, key, value: DataFrame) -> None: """ Checks if a key used as indexer has the same length as the columns it is associated with. Parameters ---------- columns : Index The columns of the DataFrame to index. key : A list-like of keys to index with. value : DataFrame The value to set for the keys. Raises ------ ValueError: If the length of key is not equal to the number of columns in value or if the number of columns referenced by key is not equal to number of columns. """ if columns.is_unique: if len(value.columns) != len(key): raise ValueError("Columns must be same length as key") else: # Missing keys in columns are represented as -1 if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns): raise ValueError("Columns must be same length as key")
Checks if a key used as indexer has the same length as the columns it is associated with. Parameters ---------- columns : Index The columns of the DataFrame to index. key : A list-like of keys to index with. value : DataFrame The value to set for the keys. Raises ------ ValueError: If the length of key is not equal to the number of columns in value or if the number of columns referenced by key is not equal to number of columns.
173,352
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `unpack_tuple_and_ellipses` function. Write a Python function `def unpack_tuple_and_ellipses(item: tuple)` to solve the following problem: Possibly unpack arr[..., n] to arr[n] Here is the function: def unpack_tuple_and_ellipses(item: tuple): """ Possibly unpack arr[..., n] to arr[n] """ if len(item) > 1: # Note: we are assuming this indexing is being done on a 1D arraylike if item[0] is Ellipsis: item = item[1:] elif item[-1] is Ellipsis: item = item[:-1] if len(item) > 1: raise IndexError("too many indices for array.") item = item[0] return item
Possibly unpack arr[..., n] to arr[n]
173,353
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, ) import numpy as np from pandas._typing import AnyArrayLike from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) Any = object() AnyArrayLike = Union[ArrayLike, "Index", "Series"] def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None The provided code snippet includes necessary dependencies for implementing the `check_array_indexer` function. Write a Python function `def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any` to solve the following problem: Check if `indexer` is a valid array indexer for `array`. For a boolean mask, `array` and `indexer` are checked to have the same length. The dtype is validated, and if it is an integer or boolean ExtensionArray, it is checked if there are missing values present, and it is converted to the appropriate numpy array. Other dtypes will raise an error. Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed through as is. Parameters ---------- array : array-like The array that is being indexed (only used for the length). indexer : array-like or list-like The array-like that's used to index. List-like input that is not yet a numpy array or an ExtensionArray is converted to one. Other input types are passed through as is. Returns ------- numpy.ndarray The validated indexer as a numpy array that can be used to index. Raises ------ IndexError When the lengths don't match. ValueError When `indexer` cannot be converted to a numpy ndarray to index (e.g. presence of missing values). See Also -------- api.types.is_bool_dtype : Check if `key` is of boolean dtype. Examples -------- When checking a boolean mask, a boolean ndarray is returned when the arguments are all valid. >>> mask = pd.array([True, False]) >>> arr = pd.array([1, 2]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) An IndexError is raised when the lengths don't match. >>> mask = pd.array([True, False, True]) >>> pd.api.indexers.check_array_indexer(arr, mask) Traceback (most recent call last): ... IndexError: Boolean index has wrong length: 3 instead of 2. NA values in a boolean array are treated as False. >>> mask = pd.array([True, pd.NA]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) A numpy boolean mask will get passed through (if the length is correct): >>> mask = np.array([True, False]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) Similarly for integer indexers, an integer ndarray is returned when it is a valid indexer, otherwise an error is (for integer indexers, a matching length is not required): >>> indexer = pd.array([0, 2], dtype="Int64") >>> arr = pd.array([1, 2, 3]) >>> pd.api.indexers.check_array_indexer(arr, indexer) array([0, 2]) >>> indexer = pd.array([0, pd.NA], dtype="Int64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... ValueError: Cannot index with an integer indexer containing NA values For non-integer/boolean dtypes, an appropriate error is raised: >>> indexer = np.array([0., 2.], dtype="float64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... IndexError: arrays used as indices must be of integer or boolean type Here is the function: def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: """ Check if `indexer` is a valid array indexer for `array`. For a boolean mask, `array` and `indexer` are checked to have the same length. The dtype is validated, and if it is an integer or boolean ExtensionArray, it is checked if there are missing values present, and it is converted to the appropriate numpy array. Other dtypes will raise an error. Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed through as is. Parameters ---------- array : array-like The array that is being indexed (only used for the length). indexer : array-like or list-like The array-like that's used to index. List-like input that is not yet a numpy array or an ExtensionArray is converted to one. Other input types are passed through as is. Returns ------- numpy.ndarray The validated indexer as a numpy array that can be used to index. Raises ------ IndexError When the lengths don't match. ValueError When `indexer` cannot be converted to a numpy ndarray to index (e.g. presence of missing values). See Also -------- api.types.is_bool_dtype : Check if `key` is of boolean dtype. Examples -------- When checking a boolean mask, a boolean ndarray is returned when the arguments are all valid. >>> mask = pd.array([True, False]) >>> arr = pd.array([1, 2]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) An IndexError is raised when the lengths don't match. >>> mask = pd.array([True, False, True]) >>> pd.api.indexers.check_array_indexer(arr, mask) Traceback (most recent call last): ... IndexError: Boolean index has wrong length: 3 instead of 2. NA values in a boolean array are treated as False. >>> mask = pd.array([True, pd.NA]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) A numpy boolean mask will get passed through (if the length is correct): >>> mask = np.array([True, False]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) Similarly for integer indexers, an integer ndarray is returned when it is a valid indexer, otherwise an error is (for integer indexers, a matching length is not required): >>> indexer = pd.array([0, 2], dtype="Int64") >>> arr = pd.array([1, 2, 3]) >>> pd.api.indexers.check_array_indexer(arr, indexer) array([0, 2]) >>> indexer = pd.array([0, pd.NA], dtype="Int64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... ValueError: Cannot index with an integer indexer containing NA values For non-integer/boolean dtypes, an appropriate error is raised: >>> indexer = np.array([0., 2.], dtype="float64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... IndexError: arrays used as indices must be of integer or boolean type """ from pandas.core.construction import array as pd_array # whatever is not an array-like is returned as-is (possible valid array # indexers that are not array-like: integer, slice, Ellipsis, None) # In this context, tuples are not considered as array-like, as they have # a specific meaning in indexing (multi-dimensional indexing) if is_list_like(indexer): if isinstance(indexer, tuple): return indexer else: return indexer # convert list-likes to array if not is_array_like(indexer): indexer = pd_array(indexer) if len(indexer) == 0: # empty list is converted to float array by pd.array indexer = np.array([], dtype=np.intp) dtype = indexer.dtype if is_bool_dtype(dtype): if is_extension_array_dtype(dtype): indexer = indexer.to_numpy(dtype=bool, na_value=False) else: indexer = np.asarray(indexer, dtype=bool) # GH26658 if len(indexer) != len(array): raise IndexError( f"Boolean index has wrong length: " f"{len(indexer)} instead of {len(array)}" ) elif is_integer_dtype(dtype): try: indexer = np.asarray(indexer, dtype=np.intp) except ValueError as err: raise ValueError( "Cannot index with an integer indexer containing NA values" ) from err else: raise IndexError("arrays used as indices must be of integer or boolean type") return indexer
Check if `indexer` is a valid array indexer for `array`. For a boolean mask, `array` and `indexer` are checked to have the same length. The dtype is validated, and if it is an integer or boolean ExtensionArray, it is checked if there are missing values present, and it is converted to the appropriate numpy array. Other dtypes will raise an error. Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed through as is. Parameters ---------- array : array-like The array that is being indexed (only used for the length). indexer : array-like or list-like The array-like that's used to index. List-like input that is not yet a numpy array or an ExtensionArray is converted to one. Other input types are passed through as is. Returns ------- numpy.ndarray The validated indexer as a numpy array that can be used to index. Raises ------ IndexError When the lengths don't match. ValueError When `indexer` cannot be converted to a numpy ndarray to index (e.g. presence of missing values). See Also -------- api.types.is_bool_dtype : Check if `key` is of boolean dtype. Examples -------- When checking a boolean mask, a boolean ndarray is returned when the arguments are all valid. >>> mask = pd.array([True, False]) >>> arr = pd.array([1, 2]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) An IndexError is raised when the lengths don't match. >>> mask = pd.array([True, False, True]) >>> pd.api.indexers.check_array_indexer(arr, mask) Traceback (most recent call last): ... IndexError: Boolean index has wrong length: 3 instead of 2. NA values in a boolean array are treated as False. >>> mask = pd.array([True, pd.NA]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) A numpy boolean mask will get passed through (if the length is correct): >>> mask = np.array([True, False]) >>> pd.api.indexers.check_array_indexer(arr, mask) array([ True, False]) Similarly for integer indexers, an integer ndarray is returned when it is a valid indexer, otherwise an error is (for integer indexers, a matching length is not required): >>> indexer = pd.array([0, 2], dtype="Int64") >>> arr = pd.array([1, 2, 3]) >>> pd.api.indexers.check_array_indexer(arr, indexer) array([0, 2]) >>> indexer = pd.array([0, pd.NA], dtype="Int64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... ValueError: Cannot index with an integer indexer containing NA values For non-integer/boolean dtypes, an appropriate error is raised: >>> indexer = np.array([0., 2.], dtype="float64") >>> pd.api.indexers.check_array_indexer(arr, indexer) Traceback (most recent call last): ... IndexError: arrays used as indices must be of integer or boolean type
173,354
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) with cf.config_prefix("display"): cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, float_format_doc, validator=is_one_of_factory([None, is_callable]), ) cf.register_option( "max_info_rows", 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, pc_min_rows_doc, validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) cf.register_option( "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int, ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", "truncate", pc_large_repr_doc, validator=is_one_of_factory(["truncate", "info"]), ) cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) cf.register_option( "colheader_justify", "right", colheader_justify_doc, validator=is_text ) cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) cf.register_option( "show_dimensions", "truncate", pc_show_dimensions_doc, validator=is_one_of_factory([True, False, "truncate"]), ) cf.register_option("chop_threshold", None, pc_chop_threshold_doc) cf.register_option("max_seq_items", 100, pc_max_seq_items) cf.register_option( "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) ) cf.register_option( "memory_usage", True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, "deep"]), ) cf.register_option( "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "html.table_schema", False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb, ) cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) cf.register_option( "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int ) with cf.config_prefix("mode"): cf.register_option("sim_interactive", False, tc_sim_interactive_doc) with cf.config_prefix("mode"): cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) with cf.config_prefix("mode"): cf.register_option( "data_manager", # Get the default from an environment variable, if set, otherwise defaults # to "block". This environment variable can be set for testing. os.environ.get("PANDAS_DATA_MANAGER", "block"), data_manager_doc, validator=is_one_of_factory(["block", "array"]), ) with cf.config_prefix("mode"): cf.register_option( "copy_on_write", # Get the default from an environment variable, if set, otherwise defaults # to False. This environment variable can be set for testing. os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", copy_on_write_doc, validator=is_bool, ) with cf.config_prefix("mode"): cf.register_option( "chained_assignment", "warn", chained_assignment, validator=is_one_of_factory([None, "warn", "raise"]), ) with cf.config_prefix("mode"): cf.register_option( "string_storage", "python", string_storage_doc, validator=is_one_of_factory(["python", "pyarrow"]), ) with cf.config_prefix("io.excel.xls"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), validator=is_one_of_factory(_xls_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ["auto"]), ) with cf.config_prefix("io.excel.ods"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=is_one_of_factory(_ods_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsb"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=str, ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=str, ) with cf.config_prefix("io.excel.ods"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=str, ) with cf.config_prefix("io.parquet"): cf.register_option( "engine", "auto", parquet_engine_doc, validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) with cf.config_prefix("io.sql"): cf.register_option( "engine", "auto", sql_engine_doc, validator=is_one_of_factory(["auto", "sqlalchemy"]), ) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), ) def use_bottleneck_cb(key) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key))
null
173,355
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) with cf.config_prefix("display"): cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, float_format_doc, validator=is_one_of_factory([None, is_callable]), ) cf.register_option( "max_info_rows", 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, pc_min_rows_doc, validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) cf.register_option( "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int, ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", "truncate", pc_large_repr_doc, validator=is_one_of_factory(["truncate", "info"]), ) cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) cf.register_option( "colheader_justify", "right", colheader_justify_doc, validator=is_text ) cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) cf.register_option( "show_dimensions", "truncate", pc_show_dimensions_doc, validator=is_one_of_factory([True, False, "truncate"]), ) cf.register_option("chop_threshold", None, pc_chop_threshold_doc) cf.register_option("max_seq_items", 100, pc_max_seq_items) cf.register_option( "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) ) cf.register_option( "memory_usage", True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, "deep"]), ) cf.register_option( "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "html.table_schema", False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb, ) cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) cf.register_option( "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int ) with cf.config_prefix("mode"): cf.register_option("sim_interactive", False, tc_sim_interactive_doc) with cf.config_prefix("mode"): cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) with cf.config_prefix("mode"): cf.register_option( "data_manager", # Get the default from an environment variable, if set, otherwise defaults # to "block". This environment variable can be set for testing. os.environ.get("PANDAS_DATA_MANAGER", "block"), data_manager_doc, validator=is_one_of_factory(["block", "array"]), ) with cf.config_prefix("mode"): cf.register_option( "copy_on_write", # Get the default from an environment variable, if set, otherwise defaults # to False. This environment variable can be set for testing. os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", copy_on_write_doc, validator=is_bool, ) with cf.config_prefix("mode"): cf.register_option( "chained_assignment", "warn", chained_assignment, validator=is_one_of_factory([None, "warn", "raise"]), ) with cf.config_prefix("mode"): cf.register_option( "string_storage", "python", string_storage_doc, validator=is_one_of_factory(["python", "pyarrow"]), ) with cf.config_prefix("io.excel.xls"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), validator=is_one_of_factory(_xls_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ["auto"]), ) with cf.config_prefix("io.excel.ods"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=is_one_of_factory(_ods_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsb"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=str, ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=str, ) with cf.config_prefix("io.excel.ods"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=str, ) with cf.config_prefix("io.parquet"): cf.register_option( "engine", "auto", parquet_engine_doc, validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) with cf.config_prefix("io.sql"): cf.register_option( "engine", "auto", sql_engine_doc, validator=is_one_of_factory(["auto", "sqlalchemy"]), ) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), ) def use_numexpr_cb(key) -> None: from pandas.core.computation import expressions expressions.set_use_numexpr(cf.get_option(key))
null
173,356
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) with cf.config_prefix("display"): cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, float_format_doc, validator=is_one_of_factory([None, is_callable]), ) cf.register_option( "max_info_rows", 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, pc_min_rows_doc, validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) cf.register_option( "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int, ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", "truncate", pc_large_repr_doc, validator=is_one_of_factory(["truncate", "info"]), ) cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) cf.register_option( "colheader_justify", "right", colheader_justify_doc, validator=is_text ) cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) cf.register_option( "show_dimensions", "truncate", pc_show_dimensions_doc, validator=is_one_of_factory([True, False, "truncate"]), ) cf.register_option("chop_threshold", None, pc_chop_threshold_doc) cf.register_option("max_seq_items", 100, pc_max_seq_items) cf.register_option( "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) ) cf.register_option( "memory_usage", True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, "deep"]), ) cf.register_option( "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "html.table_schema", False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb, ) cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) cf.register_option( "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int ) with cf.config_prefix("mode"): cf.register_option("sim_interactive", False, tc_sim_interactive_doc) with cf.config_prefix("mode"): cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) with cf.config_prefix("mode"): cf.register_option( "data_manager", # Get the default from an environment variable, if set, otherwise defaults # to "block". This environment variable can be set for testing. os.environ.get("PANDAS_DATA_MANAGER", "block"), data_manager_doc, validator=is_one_of_factory(["block", "array"]), ) with cf.config_prefix("mode"): cf.register_option( "copy_on_write", # Get the default from an environment variable, if set, otherwise defaults # to False. This environment variable can be set for testing. os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", copy_on_write_doc, validator=is_bool, ) with cf.config_prefix("mode"): cf.register_option( "chained_assignment", "warn", chained_assignment, validator=is_one_of_factory([None, "warn", "raise"]), ) with cf.config_prefix("mode"): cf.register_option( "string_storage", "python", string_storage_doc, validator=is_one_of_factory(["python", "pyarrow"]), ) with cf.config_prefix("io.excel.xls"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), validator=is_one_of_factory(_xls_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ["auto"]), ) with cf.config_prefix("io.excel.ods"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=is_one_of_factory(_ods_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsb"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=str, ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=str, ) with cf.config_prefix("io.excel.ods"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=str, ) with cf.config_prefix("io.parquet"): cf.register_option( "engine", "auto", parquet_engine_doc, validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) with cf.config_prefix("io.sql"): cf.register_option( "engine", "auto", sql_engine_doc, validator=is_one_of_factory(["auto", "sqlalchemy"]), ) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), ) def use_numba_cb(key) -> None: from pandas.core.util import numba_ numba_.set_use_numba(cf.get_option(key))
null
173,357
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) with cf.config_prefix("display"): cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, float_format_doc, validator=is_one_of_factory([None, is_callable]), ) cf.register_option( "max_info_rows", 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, pc_min_rows_doc, validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) cf.register_option( "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int, ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", "truncate", pc_large_repr_doc, validator=is_one_of_factory(["truncate", "info"]), ) cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) cf.register_option( "colheader_justify", "right", colheader_justify_doc, validator=is_text ) cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) cf.register_option( "show_dimensions", "truncate", pc_show_dimensions_doc, validator=is_one_of_factory([True, False, "truncate"]), ) cf.register_option("chop_threshold", None, pc_chop_threshold_doc) cf.register_option("max_seq_items", 100, pc_max_seq_items) cf.register_option( "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) ) cf.register_option( "memory_usage", True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, "deep"]), ) cf.register_option( "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "html.table_schema", False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb, ) cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) cf.register_option( "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int ) with cf.config_prefix("mode"): cf.register_option("sim_interactive", False, tc_sim_interactive_doc) with cf.config_prefix("mode"): cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) with cf.config_prefix("mode"): cf.register_option( "data_manager", # Get the default from an environment variable, if set, otherwise defaults # to "block". This environment variable can be set for testing. os.environ.get("PANDAS_DATA_MANAGER", "block"), data_manager_doc, validator=is_one_of_factory(["block", "array"]), ) with cf.config_prefix("mode"): cf.register_option( "copy_on_write", # Get the default from an environment variable, if set, otherwise defaults # to False. This environment variable can be set for testing. os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", copy_on_write_doc, validator=is_bool, ) with cf.config_prefix("mode"): cf.register_option( "chained_assignment", "warn", chained_assignment, validator=is_one_of_factory([None, "warn", "raise"]), ) with cf.config_prefix("mode"): cf.register_option( "string_storage", "python", string_storage_doc, validator=is_one_of_factory(["python", "pyarrow"]), ) with cf.config_prefix("io.excel.xls"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), validator=is_one_of_factory(_xls_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ["auto"]), ) with cf.config_prefix("io.excel.ods"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=is_one_of_factory(_ods_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsb"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=str, ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=str, ) with cf.config_prefix("io.excel.ods"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=str, ) with cf.config_prefix("io.parquet"): cf.register_option( "engine", "auto", parquet_engine_doc, validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) with cf.config_prefix("io.sql"): cf.register_option( "engine", "auto", sql_engine_doc, validator=is_one_of_factory(["auto", "sqlalchemy"]), ) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), ) def enable_data_resource_formatter(enable: bool) -> None: def table_schema_cb(key) -> None: from pandas.io.formats.printing import enable_data_resource_formatter enable_data_resource_formatter(cf.get_option(key))
null
173,358
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) The provided code snippet includes necessary dependencies for implementing the `is_terminal` function. Write a Python function `def is_terminal() -> bool` to solve the following problem: Detect if Python is running in a terminal. Returns True if Python is running in a terminal or False if not. Here is the function: def is_terminal() -> bool: """ Detect if Python is running in a terminal. Returns True if Python is running in a terminal or False if not. """ try: # error: Name 'get_ipython' is not defined ip = get_ipython() # type: ignore[name-defined] except NameError: # assume standard Python interpreter in a terminal return True else: if hasattr(ip, "kernel"): # IPython as a Jupyter kernel return False else: # IPython in a terminal return True
Detect if Python is running in a terminal. Returns True if Python is running in a terminal or False if not.
173,359
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) def _use_inf_as_na(key) -> None: """ Option change callback for na/inf behaviour. Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * https://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312 """ inf_as_na = get_option(key) globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na) if inf_as_na: globals()["nan_checker"] = lambda x: ~np.isfinite(x) globals()["INF_AS_NA"] = True else: globals()["nan_checker"] = np.isnan globals()["INF_AS_NA"] = False def use_inf_as_na_cb(key) -> None: from pandas.core.dtypes.missing import _use_inf_as_na _use_inf_as_na(key)
null
173,360
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module def register_plotting_backend_cb(key) -> None: if key == "matplotlib": # We defer matplotlib validation, since it's the default return from pandas.plotting._core import _get_plot_backend _get_plot_backend(key)
null
173,361
from __future__ import annotations import os from typing import Callable import pandas._config.config as cf from pandas._config.config import ( is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text, ) with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) with cf.config_prefix("display"): cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, float_format_doc, validator=is_one_of_factory([None, is_callable]), ) cf.register_option( "max_info_rows", 1690785, pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, pc_min_rows_doc, validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) cf.register_option( "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int, ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", "truncate", pc_large_repr_doc, validator=is_one_of_factory(["truncate", "info"]), ) cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) cf.register_option( "colheader_justify", "right", colheader_justify_doc, validator=is_text ) cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) cf.register_option( "show_dimensions", "truncate", pc_show_dimensions_doc, validator=is_one_of_factory([True, False, "truncate"]), ) cf.register_option("chop_threshold", None, pc_chop_threshold_doc) cf.register_option("max_seq_items", 100, pc_max_seq_items) cf.register_option( "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) ) cf.register_option( "memory_usage", True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, "deep"]), ) cf.register_option( "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool ) cf.register_option( "html.table_schema", False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb, ) cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) cf.register_option( "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int ) with cf.config_prefix("mode"): cf.register_option("sim_interactive", False, tc_sim_interactive_doc) with cf.config_prefix("mode"): cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) with cf.config_prefix("mode"): cf.register_option( "data_manager", # Get the default from an environment variable, if set, otherwise defaults # to "block". This environment variable can be set for testing. os.environ.get("PANDAS_DATA_MANAGER", "block"), data_manager_doc, validator=is_one_of_factory(["block", "array"]), ) with cf.config_prefix("mode"): cf.register_option( "copy_on_write", # Get the default from an environment variable, if set, otherwise defaults # to False. This environment variable can be set for testing. os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", copy_on_write_doc, validator=is_bool, ) with cf.config_prefix("mode"): cf.register_option( "chained_assignment", "warn", chained_assignment, validator=is_one_of_factory([None, "warn", "raise"]), ) with cf.config_prefix("mode"): cf.register_option( "string_storage", "python", string_storage_doc, validator=is_one_of_factory(["python", "pyarrow"]), ) with cf.config_prefix("io.excel.xls"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), validator=is_one_of_factory(_xls_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ["auto"]), ) with cf.config_prefix("io.excel.ods"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=is_one_of_factory(_ods_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsb"): cf.register_option( "reader", "auto", reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ["auto"]), ) with cf.config_prefix("io.excel.xlsm"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), validator=str, ) with cf.config_prefix("io.excel.xlsx"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), validator=str, ) with cf.config_prefix("io.excel.ods"): cf.register_option( "writer", "auto", writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), validator=str, ) with cf.config_prefix("io.parquet"): cf.register_option( "engine", "auto", parquet_engine_doc, validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), ) with cf.config_prefix("io.sql"): cf.register_option( "engine", "auto", sql_engine_doc, validator=is_one_of_factory(["auto", "sqlalchemy"]), ) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), ) def register_converter_cb(key) -> None: from pandas.plotting import ( deregister_matplotlib_converters, register_matplotlib_converters, ) if cf.get_option(key): register_matplotlib_converters() else: deregister_matplotlib_converters()
null
173,362
from __future__ import annotations from datetime import ( datetime, time, ) import numpy as np from pandas._libs.lib import is_list_like from pandas._typing import DateTimeErrorChoices from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.dtypes.missing import notna _time_formats = [ "%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p", ] def _guess_time_format_for_array(arr): # Try to guess the format based on the first non-NaN element non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): element = arr[non_nan_elements[0]] for time_format in _time_formats: try: datetime.strptime(element, time_format) return time_format except ValueError: pass return None from time import struct_time class time: min: ClassVar[time] max: ClassVar[time] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ... ) -> None: ... def hour(self) -> int: ... def minute(self) -> int: ... def second(self) -> int: ... def microsecond(self) -> int: ... def tzinfo(self) -> Optional[_tzinfo]: ... if sys.version_info >= (3, 6): def fold(self) -> int: ... def __le__(self, other: time) -> bool: ... def __lt__(self, other: time) -> bool: ... def __ge__(self, other: time) -> bool: ... def __gt__(self, other: time) -> bool: ... def __hash__(self) -> int: ... if sys.version_info >= (3, 6): def isoformat(self, timespec: str = ...) -> str: ... else: def isoformat(self) -> str: ... if sys.version_info >= (3, 7): def fromisoformat(cls: Type[_S], time_string: str) -> _S: ... def strftime(self, fmt: _Text) -> str: ... if sys.version_info >= (3,): def __format__(self, fmt: str) -> str: ... else: def __format__(self, fmt: AnyStr) -> AnyStr: ... def utcoffset(self) -> Optional[timedelta]: ... def tzname(self) -> Optional[str]: ... def dst(self) -> Optional[timedelta]: ... if sys.version_info >= (3, 6): def replace( self, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> time: ... else: def replace( self, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ... ) -> time: ... class datetime(date): min: ClassVar[datetime] max: ClassVar[datetime] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> _S: ... else: def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> _S: ... def year(self) -> int: ... def month(self) -> int: ... def day(self) -> int: ... def hour(self) -> int: ... def minute(self) -> int: ... def second(self) -> int: ... def microsecond(self) -> int: ... def tzinfo(self) -> Optional[_tzinfo]: ... if sys.version_info >= (3, 6): def fold(self) -> int: ... def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ... def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ... def today(cls: Type[_S]) -> _S: ... def fromordinal(cls: Type[_S], n: int) -> _S: ... if sys.version_info >= (3, 8): def now(cls: Type[_S], tz: Optional[_tzinfo] = ...) -> _S: ... else: def now(cls: Type[_S], tz: None = ...) -> _S: ... def now(cls, tz: _tzinfo) -> datetime: ... def utcnow(cls: Type[_S]) -> _S: ... if sys.version_info >= (3, 6): def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ... else: def combine(cls, date: _date, time: _time) -> datetime: ... if sys.version_info >= (3, 7): def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... def strftime(self, fmt: _Text) -> str: ... if sys.version_info >= (3,): def __format__(self, fmt: str) -> str: ... else: def __format__(self, fmt: AnyStr) -> AnyStr: ... def toordinal(self) -> int: ... def timetuple(self) -> struct_time: ... if sys.version_info >= (3, 3): def timestamp(self) -> float: ... def utctimetuple(self) -> struct_time: ... def date(self) -> _date: ... def time(self) -> _time: ... def timetz(self) -> _time: ... if sys.version_info >= (3, 6): def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> datetime: ... else: def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> datetime: ... if sys.version_info >= (3, 8): def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ... elif sys.version_info >= (3, 3): def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ... else: def astimezone(self, tz: _tzinfo) -> datetime: ... def ctime(self) -> str: ... if sys.version_info >= (3, 6): def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... else: def isoformat(self, sep: str = ...) -> str: ... def strptime(cls, date_string: _Text, format: _Text) -> datetime: ... def utcoffset(self) -> Optional[timedelta]: ... def tzname(self) -> Optional[str]: ... def dst(self) -> Optional[timedelta]: ... def __le__(self, other: datetime) -> bool: ... # type: ignore def __lt__(self, other: datetime) -> bool: ... # type: ignore def __ge__(self, other: datetime) -> bool: ... # type: ignore def __gt__(self, other: datetime) -> bool: ... # type: ignore if sys.version_info >= (3, 8): def __add__(self: _S, other: timedelta) -> _S: ... def __radd__(self: _S, other: timedelta) -> _S: ... else: def __add__(self, other: timedelta) -> datetime: ... def __radd__(self, other: timedelta) -> datetime: ... def __sub__(self, other: datetime) -> timedelta: ... def __sub__(self, other: timedelta) -> datetime: ... def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... def isocalendar(self) -> Tuple[int, int, int]: ... DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `to_time` function. Write a Python function `def to_time( arg, format=None, infer_time_format: bool = False, errors: DateTimeErrorChoices = "raise", )` to solve the following problem: Parse time strings to time objects using fixed strptime formats ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p") Use infer_time_format if all the strings are in the same format to speed up conversion. Parameters ---------- arg : string in time format, datetime.time, list, tuple, 1-d array, Series format : str, default None Format used to convert arg into a time object. If None, fixed formats are used. infer_time_format: bool, default False Infer the time format based on the first non-NaN element. If all strings are in the same format, this will speed up conversion. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as None - If 'ignore', then invalid parsing will return the input Returns ------- datetime.time Here is the function: def to_time( arg, format=None, infer_time_format: bool = False, errors: DateTimeErrorChoices = "raise", ): """ Parse time strings to time objects using fixed strptime formats ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p") Use infer_time_format if all the strings are in the same format to speed up conversion. Parameters ---------- arg : string in time format, datetime.time, list, tuple, 1-d array, Series format : str, default None Format used to convert arg into a time object. If None, fixed formats are used. infer_time_format: bool, default False Infer the time format based on the first non-NaN element. If all strings are in the same format, this will speed up conversion. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as None - If 'ignore', then invalid parsing will return the input Returns ------- datetime.time """ def _convert_listlike(arg, format): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError( "arg must be a string, datetime, list, tuple, 1-d array, or Series" ) arg = np.asarray(arg, dtype="O") if infer_time_format and format is None: format = _guess_time_format_for_array(arg) times: list[time | None] = [] if format is not None: for element in arg: try: times.append(datetime.strptime(element, format).time()) except (ValueError, TypeError) as err: if errors == "raise": msg = ( f"Cannot convert {element} to a time with given " f"format {format}" ) raise ValueError(msg) from err if errors == "ignore": return arg else: times.append(None) else: formats = _time_formats[:] format_found = False for element in arg: time_object = None try: time_object = time.fromisoformat(element) except (ValueError, TypeError): for time_format in formats: try: time_object = datetime.strptime(element, time_format).time() if not format_found: # Put the found format in front fmt = formats.pop(formats.index(time_format)) formats.insert(0, fmt) format_found = True break except (ValueError, TypeError): continue if time_object is not None: times.append(time_object) elif errors == "raise": raise ValueError(f"Cannot convert arg {arg} to a time") elif errors == "ignore": return arg else: times.append(None) return times if arg is None: return arg elif isinstance(arg, time): return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, format) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndex): return _convert_listlike(arg, format) elif is_list_like(arg): return _convert_listlike(arg, format) return _convert_listlike(np.array([arg]), format)[0]
Parse time strings to time objects using fixed strptime formats ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p") Use infer_time_format if all the strings are in the same format to speed up conversion. Parameters ---------- arg : string in time format, datetime.time, list, tuple, 1-d array, Series format : str, default None Format used to convert arg into a time object. If None, fixed formats are used. infer_time_format: bool, default False Infer the time format based on the first non-NaN element. If all strings are in the same format, this will speed up conversion. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as None - If 'ignore', then invalid parsing will return the input Returns ------- datetime.time
173,363
from __future__ import annotations from datetime import timedelta from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( NaT, NaTType, ) from pandas._libs.tslibs.timedeltas import ( Timedelta, parse_timedelta_unit, ) from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.arrays.timedeltas import sequence_to_td64ns class timedelta(SupportsAbs[timedelta]): min: ClassVar[timedelta] max: ClassVar[timedelta] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., ) -> None: ... def days(self) -> int: ... def seconds(self) -> int: ... def microseconds(self) -> int: ... def total_seconds(self) -> float: ... def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... def __rsub__(self, other: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... def __floordiv__(self, other: timedelta) -> int: ... def __floordiv__(self, other: int) -> timedelta: ... if sys.version_info >= (3,): def __truediv__(self, other: timedelta) -> float: ... def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ... else: def __div__(self, other: timedelta) -> float: ... def __div__(self, other: float) -> timedelta: ... def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... def __gt__(self, other: timedelta) -> bool: ... def __hash__(self) -> int: ... DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] def to_timedelta( arg: str | float | timedelta, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., ) -> Timedelta: ...
null
173,364
from __future__ import annotations from datetime import timedelta from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( NaT, NaTType, ) from pandas._libs.tslibs.timedeltas import ( Timedelta, parse_timedelta_unit, ) from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.arrays.timedeltas import sequence_to_td64ns DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] def to_timedelta( arg: Series, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., ) -> Series: ...
null
173,365
from __future__ import annotations from datetime import timedelta from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( NaT, NaTType, ) from pandas._libs.tslibs.timedeltas import ( Timedelta, parse_timedelta_unit, ) from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.arrays.timedeltas import sequence_to_td64ns ArrayLike = Union["ExtensionArray", np.ndarray] DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] def to_timedelta( arg: list | tuple | range | ArrayLike | Index, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., ) -> TimedeltaIndex: ...
null
173,366
from __future__ import annotations from datetime import timedelta from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( NaT, NaTType, ) from pandas._libs.tslibs.timedeltas import ( Timedelta, parse_timedelta_unit, ) from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) from pandas.core.arrays.timedeltas import sequence_to_td64ns def _coerce_scalar_to_timedelta_type( r, unit: UnitChoices | None = "ns", errors: DateTimeErrorChoices = "raise" ): """Convert string 'r' to a timedelta object.""" result: Timedelta | NaTType try: result = Timedelta(r, unit) except ValueError: if errors == "raise": raise if errors == "ignore": return r # coerce result = NaT return result def _convert_listlike( arg, unit=None, errors: DateTimeErrorChoices = "raise", name=None ): """Convert a list of objects to a timedelta index object.""" if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"): # This is needed only to ensure that in the case where we end up # returning arg (errors == "ignore"), and where the input is a # generator, we return a useful list-like instead of a # used-up generator if not hasattr(arg, "__array__"): arg = list(arg) arg = np.array(arg, dtype=object) try: td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] except ValueError: if errors == "ignore": return arg else: # This else-block accounts for the cases when errors='raise' # and errors='coerce'. If errors == 'raise', these errors # should be raised. If errors == 'coerce', we shouldn't # expect any errors to be raised, since all parsing errors # cause coercion to pd.NaT. However, if an error / bug is # introduced that causes an Exception to be raised, we would # like to surface it. raise from pandas import TimedeltaIndex value = TimedeltaIndex(td64arr, unit="ns", name=name) return value class timedelta(SupportsAbs[timedelta]): min: ClassVar[timedelta] max: ClassVar[timedelta] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., ) -> None: ... def days(self) -> int: ... def seconds(self) -> int: ... def microseconds(self) -> int: ... def total_seconds(self) -> float: ... def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... def __rsub__(self, other: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... def __floordiv__(self, other: timedelta) -> int: ... def __floordiv__(self, other: int) -> timedelta: ... if sys.version_info >= (3,): def __truediv__(self, other: timedelta) -> float: ... def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ... else: def __div__(self, other: timedelta) -> float: ... def __div__(self, other: float) -> timedelta: ... def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... def __gt__(self, other: timedelta) -> bool: ... def __hash__(self) -> int: ... ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ArrayLike = Union["ExtensionArray", np.ndarray] DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] The provided code snippet includes necessary dependencies for implementing the `to_timedelta` function. Write a Python function `def to_timedelta( arg: str | int | float | timedelta | list | tuple | range | ArrayLike | Index | Series, unit: UnitChoices | None = None, errors: DateTimeErrorChoices = "raise", ) -> Timedelta | TimedeltaIndex | Series` to solve the following problem: Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. .. versionchanged:: 2.0 Strings with units 'M', 'Y' and 'y' do not represent unambiguous timedelta values and will raise an exception. unit : str, optional Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``. Possible values: * 'W' * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L' * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U' * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N' .. versionchanged:: 1.1.0 Must not be specified when `arg` context strings and ``errors="raise"``. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta If parsing succeeded. Return type depends on input: - list-like: TimedeltaIndex of timedelta64 dtype - Series: Series of timedelta64 dtype - scalar: Timedelta See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. convert_dtypes : Convert dtypes. Notes ----- If the precision is higher than nanoseconds, the precision of the duration is truncated to nanoseconds for string inputs. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015500') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03', '0 days 00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) Here is the function: def to_timedelta( arg: str | int | float | timedelta | list | tuple | range | ArrayLike | Index | Series, unit: UnitChoices | None = None, errors: DateTimeErrorChoices = "raise", ) -> Timedelta | TimedeltaIndex | Series: """ Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. .. versionchanged:: 2.0 Strings with units 'M', 'Y' and 'y' do not represent unambiguous timedelta values and will raise an exception. unit : str, optional Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``. Possible values: * 'W' * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L' * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U' * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N' .. versionchanged:: 1.1.0 Must not be specified when `arg` context strings and ``errors="raise"``. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta If parsing succeeded. Return type depends on input: - list-like: TimedeltaIndex of timedelta64 dtype - Series: Series of timedelta64 dtype - scalar: Timedelta See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. convert_dtypes : Convert dtypes. Notes ----- If the precision is higher than nanoseconds, the precision of the duration is truncated to nanoseconds for string inputs. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015500') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03', '0 days 00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) """ if unit is not None: unit = parse_timedelta_unit(unit) if errors not in ("ignore", "raise", "coerce"): raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'.") if unit in {"Y", "y", "M"}: raise ValueError( "Units 'M', 'Y', and 'y' are no longer supported, as they do not " "represent unambiguous timedelta values durations." ) if arg is None: return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndex): return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name) elif isinstance(arg, np.ndarray) and arg.ndim == 0: # extract array scalar and process below # error: Incompatible types in assignment (expression has type "object", # variable has type "Union[str, int, float, timedelta, List[Any], # Tuple[Any, ...], Union[Union[ExtensionArray, ndarray[Any, Any]], Index, # Series]]") [assignment] arg = lib.item_from_zerodim(arg) # type: ignore[assignment] elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1: return _convert_listlike(arg, unit=unit, errors=errors) elif getattr(arg, "ndim", 1) > 1: raise TypeError( "arg must be a string, timedelta, list, tuple, 1-d array, or Series" ) if isinstance(arg, str) and unit is not None: raise ValueError("unit must not be specified if the input is/contains a str") # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. .. versionchanged:: 2.0 Strings with units 'M', 'Y' and 'y' do not represent unambiguous timedelta values and will raise an exception. unit : str, optional Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``. Possible values: * 'W' * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L' * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U' * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N' .. versionchanged:: 1.1.0 Must not be specified when `arg` context strings and ``errors="raise"``. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta If parsing succeeded. Return type depends on input: - list-like: TimedeltaIndex of timedelta64 dtype - Series: Series of timedelta64 dtype - scalar: Timedelta See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. convert_dtypes : Convert dtypes. Notes ----- If the precision is higher than nanoseconds, the precision of the duration is truncated to nanoseconds for string inputs. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015500') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03', '0 days 00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
173,367
from __future__ import annotations from collections import abc from datetime import datetime from functools import partial from itertools import islice from typing import ( TYPE_CHECKING, Callable, Hashable, List, Tuple, TypedDict, Union, cast, overload, ) import warnings import numpy as np from pandas._libs import ( lib, tslib, ) from pandas._libs.tslibs import ( OutOfBoundsDatetime, Timedelta, Timestamp, astype_overflowsafe, get_unit_from_dtype, iNaT, is_supported_unit, nat_strings, parsing, timezones as libtimezones, ) from pandas._libs.tslibs.conversion import precision_from_unit from pandas._libs.tslibs.parsing import ( DateParseError, guess_datetime_format, ) from pandas._libs.tslibs.strptime import array_strptime from pandas._typing import ( AnyArrayLike, ArrayLike, DateTimeErrorChoices, npt, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_datetime64_dtype, is_datetime64tz_dtype, is_float, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_scalar, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import notna from pandas.arrays import ( DatetimeArray, IntegerArray, PandasArray, ) from pandas.core import algorithms from pandas.core.algorithms import unique from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.datetimes import ( maybe_convert_dtype, objects_to_datetime64ns, tz_to_dtype, ) from pandas.core.construction import extract_array from pandas.core.indexes.base import Index from pandas.core.indexes.datetimes import DatetimeIndex def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res ) The provided code snippet includes necessary dependencies for implementing the `_attempt_YYYYMMDD` function. Write a Python function `def _attempt_YYYYMMDD(arg: npt.NDArray[np.object_], errors: str) -> np.ndarray | None` to solve the following problem: try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) Parameters ---------- arg : np.ndarray[object] errors : {'raise','ignore','coerce'} Here is the function: def _attempt_YYYYMMDD(arg: npt.NDArray[np.object_], errors: str) -> np.ndarray | None: """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) Parameters ---------- arg : np.ndarray[object] errors : {'raise','ignore','coerce'} """ def calc(carg): # calculate the actual result carg = carg.astype(object, copy=False) parsed = parsing.try_parse_year_month_day( carg / 10000, carg / 100 % 100, carg % 100 ) return tslib.array_to_datetime(parsed, errors=errors)[0] def calc_with_mask(carg, mask): result = np.empty(carg.shape, dtype="M8[ns]") iresult = result.view("i8") iresult[~mask] = iNaT masked_result = calc(carg[mask].astype(np.float64).astype(np.int64)) result[mask] = masked_result.astype("M8[ns]") return result # try intlike / strings that are ints try: return calc(arg.astype(np.int64)) except (ValueError, OverflowError, TypeError): pass # a float with actual np.nan try: carg = arg.astype(np.float64) return calc_with_mask(carg, notna(carg)) except (ValueError, OverflowError, TypeError): pass # string with NaN-like try: # error: Argument 2 to "isin" has incompatible type "List[Any]"; expected # "Union[Union[ExtensionArray, ndarray], Index, Series]" mask = ~algorithms.isin(arg, list(nat_strings)) # type: ignore[arg-type] return calc_with_mask(arg, mask) except (ValueError, OverflowError, TypeError): pass return None
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) Parameters ---------- arg : np.ndarray[object] errors : {'raise','ignore','coerce'}
173,368
from __future__ import annotations import itertools from typing import ( TYPE_CHECKING, Hashable, Iterable, Iterator, cast, ) import numpy as np from pandas._libs import lib from pandas._libs.hashing import hash_object_array from pandas._typing import ( ArrayLike, npt, ) from pandas.core.dtypes.common import ( is_categorical_dtype, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) _default_hash_key = "0123456789123456" def combine_hash_arrays( arrays: Iterator[np.ndarray], num_items: int ) -> npt.NDArray[np.uint64]: """ Parameters ---------- arrays : Iterator[np.ndarray] num_items : int Returns ------- np.ndarray[uint64] Should be the same as CPython's tupleobject.c """ try: first = next(arrays) except StopIteration: return np.array([], dtype=np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(0x345678) last_i = 0 for i, a in enumerate(arrays): inverse_i = num_items - i out ^= a out *= mult mult += np.uint64(82520 + inverse_i + inverse_i) last_i = i assert last_i + 1 == num_items, "Fed in wrong num_items" out += np.uint64(97531) return out def hash_tuples( vals: MultiIndex | Iterable[tuple[Hashable, ...]], encoding: str = "utf8", hash_key: str = _default_hash_key, ) -> npt.NDArray[np.uint64]: """ Hash an MultiIndex / listlike-of-tuples efficiently. Parameters ---------- vals : MultiIndex or listlike-of-tuples encoding : str, default 'utf8' hash_key : str, default _default_hash_key Returns ------- ndarray[np.uint64] of hashed values """ if not is_list_like(vals): raise TypeError("must be convertible to a list-of-tuples") from pandas import ( Categorical, MultiIndex, ) if not isinstance(vals, ABCMultiIndex): mi = MultiIndex.from_tuples(vals) else: mi = vals # create a list-of-Categoricals cat_vals = [ Categorical(mi.codes[level], mi.levels[level], ordered=False, fastpath=True) for level in range(mi.nlevels) ] # hash the list-of-ndarrays hashes = ( _hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in cat_vals ) h = combine_hash_arrays(hashes, len(cat_vals)) return h def hash_array( vals: ArrayLike, encoding: str = "utf8", hash_key: str = _default_hash_key, categorize: bool = True, ) -> npt.NDArray[np.uint64]: """ Given a 1d array, return an array of deterministic integers. Parameters ---------- vals : ndarray or ExtensionArray encoding : str, default 'utf8' Encoding for data & key when strings. hash_key : str, default _default_hash_key Hash_key for string key to encode. categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- ndarray[np.uint64, ndim=1] Hashed values, same length as the vals. """ if not hasattr(vals, "dtype"): raise TypeError("must pass a ndarray-like") dtype = vals.dtype # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): vals = cast("Categorical", vals) return _hash_categorical(vals, encoding, hash_key) elif isinstance(vals, ABCExtensionArray): vals, _ = vals._values_for_factorize() elif not isinstance(vals, np.ndarray): # GH#42003 raise TypeError( "hash_array requires np.ndarray or ExtensionArray, not " f"{type(vals).__name__}. Use hash_pandas_object instead." ) return _hash_ndarray(vals, encoding, hash_key, categorize) ABCMultiIndex = cast( "Type[MultiIndex]", create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), ) ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) The provided code snippet includes necessary dependencies for implementing the `hash_pandas_object` function. Write a Python function `def hash_pandas_object( obj: Index | DataFrame | Series, index: bool = True, encoding: str = "utf8", hash_key: str | None = _default_hash_key, categorize: bool = True, ) -> Series` to solve the following problem: Return a data hash of the Index/Series/DataFrame. Parameters ---------- obj : Index, Series, or DataFrame index : bool, default True Include the index in the hash (if Series/DataFrame). encoding : str, default 'utf8' Encoding for data & key when strings. hash_key : str, default _default_hash_key Hash_key for string key to encode. categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- Series of uint64, same length as the object Here is the function: def hash_pandas_object( obj: Index | DataFrame | Series, index: bool = True, encoding: str = "utf8", hash_key: str | None = _default_hash_key, categorize: bool = True, ) -> Series: """ Return a data hash of the Index/Series/DataFrame. Parameters ---------- obj : Index, Series, or DataFrame index : bool, default True Include the index in the hash (if Series/DataFrame). encoding : str, default 'utf8' Encoding for data & key when strings. hash_key : str, default _default_hash_key Hash_key for string key to encode. categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- Series of uint64, same length as the object """ from pandas import Series if hash_key is None: hash_key = _default_hash_key if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False) elif isinstance(obj, ABCIndex): h = hash_array(obj._values, encoding, hash_key, categorize).astype( "uint64", copy=False ) ser = Series(h, index=obj, dtype="uint64", copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj._values, encoding, hash_key, categorize).astype( "uint64", copy=False ) if index: index_iter = ( hash_pandas_object( obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize, )._values for _ in [None] ) arrays = itertools.chain([h], index_iter) h = combine_hash_arrays(arrays, 2) ser = Series(h, index=obj.index, dtype="uint64", copy=False) elif isinstance(obj, ABCDataFrame): hashes = ( hash_array(series._values, encoding, hash_key, categorize) for _, series in obj.items() ) num_items = len(obj.columns) if index: index_hash_generator = ( hash_pandas_object( obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize, )._values for _ in [None] ) num_items += 1 # keep `hashes` specifically a generator to keep mypy happy _hashes = itertools.chain(hashes, index_hash_generator) hashes = (x for x in _hashes) h = combine_hash_arrays(hashes, num_items) ser = Series(h, index=obj.index, dtype="uint64", copy=False) else: raise TypeError(f"Unexpected type for hashing {type(obj)}") return ser
Return a data hash of the Index/Series/DataFrame. Parameters ---------- obj : Index, Series, or DataFrame index : bool, default True Include the index in the hash (if Series/DataFrame). encoding : str, default 'utf8' Encoding for data & key when strings. hash_key : str, default _default_hash_key Hash_key for string key to encode. categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. Returns ------- Series of uint64, same length as the object
173,369
from __future__ import annotations import types from typing import ( TYPE_CHECKING, Callable, ) import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError GLOBAL_USE_NUMBA: bool = False The provided code snippet includes necessary dependencies for implementing the `maybe_use_numba` function. Write a Python function `def maybe_use_numba(engine: str | None) -> bool` to solve the following problem: Signal whether to use numba routines. Here is the function: def maybe_use_numba(engine: str | None) -> bool: """Signal whether to use numba routines.""" return engine == "numba" or (engine is None and GLOBAL_USE_NUMBA)
Signal whether to use numba routines.
173,370
from __future__ import annotations import types from typing import ( TYPE_CHECKING, Callable, ) import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError class NumbaUtilError(Exception): """ Error raised for unsupported Numba engine routines. """ The provided code snippet includes necessary dependencies for implementing the `get_jit_arguments` function. Write a Python function `def get_jit_arguments( engine_kwargs: dict[str, bool] | None = None, kwargs: dict | None = None ) -> dict[str, bool]` to solve the following problem: Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. Parameters ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT kwargs : dict, default None user passed keyword arguments to pass into the JITed function Returns ------- dict[str, bool] nopython, nogil, parallel Raises ------ NumbaUtilError Here is the function: def get_jit_arguments( engine_kwargs: dict[str, bool] | None = None, kwargs: dict | None = None ) -> dict[str, bool]: """ Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. Parameters ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT kwargs : dict, default None user passed keyword arguments to pass into the JITed function Returns ------- dict[str, bool] nopython, nogil, parallel Raises ------ NumbaUtilError """ if engine_kwargs is None: engine_kwargs = {} nopython = engine_kwargs.get("nopython", True) if kwargs and nopython: raise NumbaUtilError( "numba does not support kwargs with nopython=True: " "https://github.com/numba/numba/issues/2916" ) nogil = engine_kwargs.get("nogil", False) parallel = engine_kwargs.get("parallel", False) return {"nopython": nopython, "nogil": nogil, "parallel": parallel}
Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. Parameters ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT kwargs : dict, default None user passed keyword arguments to pass into the JITed function Returns ------- dict[str, bool] nopython, nogil, parallel Raises ------ NumbaUtilError
173,371
from __future__ import annotations import codecs from functools import wraps import re from typing import ( TYPE_CHECKING, Callable, Hashable, Literal, cast, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( AlignJoin, DtypeObj, F, Scalar, ) from pandas.util._decorators import Appender from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_integer, is_list_like, is_object_dtype, is_re, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.missing import isna from pandas.core.base import NoNewAttributesMixin from pandas.core.construction import extract_array if TYPE_CHECKING: from pandas import ( DataFrame, Index, Series, ) _shared_docs: dict[str, str] = {} _cpython_optimized_encoders = ( "utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii", ) _cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32") def forbid_nonstring_types( forbidden: list[str] | None, name: str | None = None ) -> Callable[[F], F]: def cat_safe(list_of_columns: list, sep: str): def cat_core(list_of_columns: list, sep: str): def _result_dtype(arr): def _get_single_group_name(regex: re.Pattern) -> Hashable: def _get_group_names(regex: re.Pattern) -> list[Hashable]: def str_extractall(arr, pat, flags: int = 0): def _map_and_wrap(name, docstring): @forbid_nonstring_types(["bytes"], name=name) def wrapper(self): result = getattr(self._data.array, f"_str_{name}")() return self._wrap_result(result) wrapper.__doc__ = docstring return wrapper
null
173,372
The provided code snippet includes necessary dependencies for implementing the `cat_safe` function. Write a Python function `def cat_safe(list_of_columns: list, sep: str)` to solve the following problem: Auxiliary function for :meth:`str.cat`. Same signature as cat_core, but handles TypeErrors in concatenation, which happen if the arrays in list_of columns have the wrong dtypes or content. Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep. Here is the function: def cat_safe(list_of_columns: list, sep: str): """ Auxiliary function for :meth:`str.cat`. Same signature as cat_core, but handles TypeErrors in concatenation, which happen if the arrays in list_of columns have the wrong dtypes or content. Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep. """ try: result = cat_core(list_of_columns, sep) except TypeError: # if there are any non-string values (wrong dtype or hidden behind # object dtype), np.sum will fail; catch and return with better message for column in list_of_columns: dtype = lib.infer_dtype(column, skipna=True) if dtype not in ["string", "empty"]: raise TypeError( "Concatenation requires list-likes containing only " "strings (or missing values). Offending values found in " f"column {dtype}" ) from None return result
Auxiliary function for :meth:`str.cat`. Same signature as cat_core, but handles TypeErrors in concatenation, which happen if the arrays in list_of columns have the wrong dtypes or content. Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep.
173,373
class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... def _get_single_group_name(regex: re.Pattern) -> Hashable: if regex.groupindex: return next(iter(regex.groupindex)) else: return None
null
173,374
from __future__ import annotations import codecs from functools import wraps import re from typing import ( TYPE_CHECKING, Callable, Hashable, Literal, cast, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( AlignJoin, DtypeObj, F, Scalar, ) from pandas.util._decorators import Appender from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_integer, is_list_like, is_object_dtype, is_re, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.missing import isna from pandas.core.base import NoNewAttributesMixin from pandas.core.construction import extract_array if TYPE_CHECKING: from pandas import ( DataFrame, Index, Series, ) _shared_docs: dict[str, str] = {} _cpython_optimized_encoders = ( "utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii", ) _cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32") ) ) _shared_docs[ "str_partition" ] = """ Split the string at the %(side)s occurrence of `sep`. This method splits the string at the %(side)s occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return %(return)s. Parameters ---------- sep : str, default whitespace String to split on. expand : bool, default True If True, return DataFrame/MultiIndex expanding dimensionality. If False, return Series/Index. Returns ------- DataFrame/MultiIndex or Series/Index of objects See Also -------- %(also)s Series.str.split : Split strings around given separators. str.partition : Standard library version. Examples -------- >>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers']) >>> s 0 Linda van der Berg 1 George Pitt-Rivers dtype: object >>> s.str.partition() 0 1 2 0 Linda van der Berg 1 George Pitt-Rivers To partition by the last space instead of the first one: >>> s.str.rpartition() 0 1 2 0 Linda van der Berg 1 George Pitt-Rivers To partition by something different than a space: >>> s.str.partition('-') 0 1 2 0 Linda van der Berg 1 George Pitt - Rivers To return a Series containing tuples instead of a DataFrame: >>> s.str.partition('-', expand=False) 0 (Linda van der Berg, , ) 1 (George Pitt, -, Rivers) dtype: object Also available on indices: >>> idx = pd.Index(['X 123', 'Y 999']) >>> idx Index(['X 123', 'Y 999'], dtype='object') Which will create a MultiIndex: >>> idx.str.partition() MultiIndex([('X', ' ', '123'), ('Y', ' ', '999')], ) Or an index with tuples with ``expand=False``: >>> idx.str.partition(expand=False) Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object') """ _shared_docs["str_partition"] % { "side": "first", "return": "3 elements containing the string itself, followed by two " "empty strings", "also": "rpartition : Split the string at the last occurrence of `sep`.", } ) ) _shared_docs[ "str_pad" ] = """ Pad %(side)s side of strings in the Series/Index. Equivalent to :meth:`str.%(method)s`. Parameters ---------- width : int Minimum width of resulting string; additional characters will be filled with ``fillchar``. fillchar : str Additional character for filling, default is whitespace. Returns ------- Series/Index of objects. """ _shared_docs[ "str_strip" ] = r""" Remove %(position)s characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the Series/Index from %(side)s. Replaces any non-strings in Series with NaNs. Equivalent to :meth:`str.%(method)s`. Parameters ---------- to_strip : str or None, default None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. Returns ------- Series or Index of object See Also -------- Series.str.strip : Remove leading and trailing characters in Series/Index. Series.str.lstrip : Remove leading characters in Series/Index. Series.str.rstrip : Remove trailing characters in Series/Index. Examples -------- >>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan, 10, True]) >>> s 0 1. Ant. 1 2. Bee!\n 2 3. Cat?\t 3 NaN 4 10 5 True dtype: object >>> s.str.strip() 0 1. Ant. 1 2. Bee! 2 3. Cat? 3 NaN 4 NaN 5 NaN dtype: object >>> s.str.lstrip('123.') 0 Ant. 1 Bee!\n 2 Cat?\t 3 NaN 4 NaN 5 NaN dtype: object >>> s.str.rstrip('.!? \n\t') 0 1. Ant 1 2. Bee 2 3. Cat 3 NaN 4 NaN 5 NaN dtype: object >>> s.str.strip('123.!? \n\t') 0 Ant 1 Bee 2 Cat 3 NaN 4 NaN 5 NaN dtype: object """ _shared_docs["str_strip"] % { "side": "left and right sides", "method": "strip", "position": "leading and trailing", } ) ) ) _shared_docs[ "str_removefix" ] = r""" Remove a %(side)s from an object series. If the %(side)s is not present, the original string will be returned. Parameters ---------- %(side)s : str Remove the %(side)s of the string. Returns ------- Series/Index: object The Series or Index with given %(side)s removed. See Also -------- Series.str.remove%(other_side)s : Remove a %(other_side)s from an object series. Examples -------- >>> s = pd.Series(["str_foo", "str_bar", "no_prefix"]) >>> s 0 str_foo 1 str_bar 2 no_prefix dtype: object >>> s.str.removeprefix("str_") 0 foo 1 bar 2 no_prefix dtype: object >>> s = pd.Series(["foo_str", "bar_str", "no_suffix"]) >>> s 0 foo_str 1 bar_str 2 no_suffix dtype: object >>> s.str.removesuffix("_str") 0 foo 1 bar 2 no_suffix dtype: object """ _shared_docs["str_removefix"] % {"side": "prefix", "other_side": "suffix"} ) ) def findall(self, pat, flags: int = 0): """ Find all occurrences of pattern or regular expression in the Series/Index. Equivalent to applying :func:`re.findall` to all the elements in the Series/Index. Parameters ---------- pat : str Pattern or regular expression. flags : int, default 0 Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which means no flags). Returns ------- Series/Index of lists of strings All non-overlapping matches of pattern or regular expression in each string of this Series/Index. See Also -------- count : Count occurrences of pattern or regular expression in each string of the Series/Index. extractall : For each string in the Series, extract groups from all matches of regular expression and return a DataFrame with one row for each match and one column for each group. re.findall : The equivalent ``re`` function to all non-overlapping matches of pattern or regular expression in string, as a list of strings. Examples -------- >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit']) The search for the pattern 'Monkey' returns one match: >>> s.str.findall('Monkey') 0 [] 1 [Monkey] 2 [] dtype: object On the other hand, the search for the pattern 'MONKEY' doesn't return any match: >>> s.str.findall('MONKEY') 0 [] 1 [] 2 [] dtype: object Flags can be added to the pattern or regular expression. For instance, to find the pattern 'MONKEY' ignoring the case: >>> import re >>> s.str.findall('MONKEY', flags=re.IGNORECASE) 0 [] 1 [Monkey] 2 [] dtype: object When the pattern matches more than one string in the Series, all matches are returned: >>> s.str.findall('on') 0 [on] 1 [on] 2 [] dtype: object Regular expressions are supported too. For instance, the search for all the strings ending with the word 'on' is shown next: >>> s.str.findall('on$') 0 [on] 1 [] 2 [] dtype: object If the pattern is found more than once in the same string, then a list of multiple strings is returned: >>> s.str.findall('b') 0 [] 1 [] 2 [b, b] dtype: object """ result = self._data.array._str_findall(pat, flags) return self._wrap_result(result, returns_string=False) _shared_docs[ "find" ] = """ Return %(side)s indexes in each strings in the Series/Index. Each of returned indexes corresponds to the position where the substring is fully contained between [start:end]. Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`. Parameters ---------- sub : str Substring being searched. start : int Left edge index. end : int Right edge index. Returns ------- Series or Index of int. See Also -------- %(also)s """ _shared_docs["find"] % { "side": "lowest", "method": "find", "also": "rfind : Return highest indexes in each strings.", } ) ) _shared_docs[ "index" ] = """ Return %(side)s indexes in each string in Series/Index. Each of the returned indexes corresponds to the position where the substring is fully contained between [start:end]. This is the same as ``str.%(similar)s`` except instead of returning -1, it raises a ValueError when the substring is not found. Equivalent to standard ``str.%(method)s``. Parameters ---------- sub : str Substring being searched. start : int Left edge index. end : int Right edge index. Returns ------- Series or Index of object See Also -------- %(also)s """ _shared_docs["index"] % { "side": "lowest", "similar": "find", "method": "index", "also": "rindex : Return highest indexes in each strings.", } ) def index(self, sub, start: int = 0, end=None): if not isinstance(sub, str): msg = f"expected a string object, not {type(sub).__name__}" raise TypeError(msg) result = self._data.array._str_index(sub, start=start, end=end) return self._wrap_result(result, returns_string=False) _shared_docs["index"] % { "side": "highest", "similar": "rfind", "method": "rindex", "also": "index : Return lowest indexes in each strings.", } ) _shared_docs[ "casemethods" ] = """ Convert strings in the Series/Index to %(type)s. %(version)s Equivalent to :meth:`str.%(method)s`. Returns ------- Series or Index of object See Also -------- Series.str.lower : Converts all characters to lowercase. Series.str.upper : Converts all characters to uppercase. Series.str.title : Converts first character of each word to uppercase and remaining to lowercase. Series.str.capitalize : Converts first character to uppercase and remaining to lowercase. Series.str.swapcase : Converts uppercase to lowercase and lowercase to uppercase. Series.str.casefold: Removes all case distinctions in the string. Examples -------- >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']) >>> s 0 lower 1 CAPITALS 2 this is a sentence 3 SwApCaSe dtype: object >>> s.str.lower() 0 lower 1 capitals 2 this is a sentence 3 swapcase dtype: object >>> s.str.upper() 0 LOWER 1 CAPITALS 2 THIS IS A SENTENCE 3 SWAPCASE dtype: object >>> s.str.title() 0 Lower 1 Capitals 2 This Is A Sentence 3 Swapcase dtype: object >>> s.str.capitalize() 0 Lower 1 Capitals 2 This is a sentence 3 Swapcase dtype: object >>> s.str.swapcase() 0 LOWER 1 capitals 2 THIS IS A SENTENCE 3 sWaPcAsE dtype: object """ # Types: # cases: # upper, lower, title, capitalize, swapcase, casefold # boolean: # isalpha, isnumeric isalnum isdigit isdecimal isspace islower isupper istitle # _doc_args holds dict of strings to use in substituting casemethod docs _doc_args: dict[str, dict[str, str]] = {} _doc_args["lower"] = {"type": "lowercase", "method": "lower", "version": ""} _doc_args["upper"] = {"type": "uppercase", "method": "upper", "version": ""} _doc_args["title"] = {"type": "titlecase", "method": "title", "version": ""} _doc_args["capitalize"] = { "type": "be capitalized", "method": "capitalize", "version": "", } _doc_args["swapcase"] = { "type": "be swapcased", "method": "swapcase", "version": "", } _doc_args["casefold"] = { "type": "be casefolded", "method": "casefold", "version": "", } _shared_docs[ "ismethods" ] = """ Check whether all characters in each string are %(type)s. This is equivalent to running the Python string method :meth:`str.%(method)s` for each element of the Series/Index. If a string has zero characters, ``False`` is returned for that check. Returns ------- Series or Index of bool Series or Index of boolean values with the same length as the original Series/Index. See Also -------- Series.str.isalpha : Check whether all characters are alphabetic. Series.str.isnumeric : Check whether all characters are numeric. Series.str.isalnum : Check whether all characters are alphanumeric. Series.str.isdigit : Check whether all characters are digits. Series.str.isdecimal : Check whether all characters are decimal. Series.str.isspace : Check whether all characters are whitespace. Series.str.islower : Check whether all characters are lowercase. Series.str.isupper : Check whether all characters are uppercase. Series.str.istitle : Check whether all characters are titlecase. Examples -------- **Checks for Alphabetic and Numeric Characters** >>> s1 = pd.Series(['one', 'one1', '1', '']) >>> s1.str.isalpha() 0 True 1 False 2 False 3 False dtype: bool >>> s1.str.isnumeric() 0 False 1 False 2 True 3 False dtype: bool >>> s1.str.isalnum() 0 True 1 True 2 True 3 False dtype: bool Note that checks against characters mixed with any additional punctuation or whitespace will evaluate to false for an alphanumeric check. >>> s2 = pd.Series(['A B', '1.5', '3,000']) >>> s2.str.isalnum() 0 False 1 False 2 False dtype: bool **More Detailed Checks for Numeric Characters** There are several different but overlapping sets of numeric characters that can be checked for. >>> s3 = pd.Series(['23', '³', '⅕', '']) The ``s3.str.isdecimal`` method checks for characters used to form numbers in base 10. >>> s3.str.isdecimal() 0 True 1 False 2 False 3 False dtype: bool The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also includes special digits, like superscripted and subscripted digits in unicode. >>> s3.str.isdigit() 0 True 1 True 2 False 3 False dtype: bool The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also includes other characters that can represent quantities such as unicode fractions. >>> s3.str.isnumeric() 0 True 1 True 2 True 3 False dtype: bool **Checks for Whitespace** >>> s4 = pd.Series([' ', '\\t\\r\\n ', '']) >>> s4.str.isspace() 0 True 1 True 2 False dtype: bool **Checks for Character Case** >>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', '']) >>> s5.str.islower() 0 True 1 False 2 False 3 False dtype: bool >>> s5.str.isupper() 0 False 1 False 2 True 3 False dtype: bool The ``s5.str.istitle`` method checks for whether all words are in title case (whether only the first letter of each word is capitalized). Words are assumed to be as any sequence of non-numeric characters separated by whitespace characters. >>> s5.str.istitle() 0 False 1 True 2 False 3 False dtype: bool """ _doc_args["isalnum"] = {"type": "alphanumeric", "method": "isalnum"} _doc_args["isalpha"] = {"type": "alphabetic", "method": "isalpha"} _doc_args["isdigit"] = {"type": "digits", "method": "isdigit"} _doc_args["isspace"] = {"type": "whitespace", "method": "isspace"} _doc_args["islower"] = {"type": "lowercase", "method": "islower"} _doc_args["isupper"] = {"type": "uppercase", "method": "isupper"} _doc_args["istitle"] = {"type": "titlecase", "method": "istitle"} _doc_args["isnumeric"] = {"type": "numeric", "method": "isnumeric"} _doc_args["isdecimal"] = {"type": "decimal", "method": "isdecimal"} # force _noarg_wrapper return type with dtype=np.dtype(bool) (GH 29624) isalnum = _map_and_wrap( "isalnum", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"] ) isalpha = _map_and_wrap( "isalpha", docstring=_shared_docs["ismethods"] % _doc_args["isalpha"] ) isdigit = _map_and_wrap( "isdigit", docstring=_shared_docs["ismethods"] % _doc_args["isdigit"] ) isspace = _map_and_wrap( "isspace", docstring=_shared_docs["ismethods"] % _doc_args["isspace"] ) islower = _map_and_wrap( "islower", docstring=_shared_docs["ismethods"] % _doc_args["islower"] ) isupper = _map_and_wrap( "isupper", docstring=_shared_docs["ismethods"] % _doc_args["isupper"] ) istitle = _map_and_wrap( "istitle", docstring=_shared_docs["ismethods"] % _doc_args["istitle"] ) isnumeric = _map_and_wrap( "isnumeric", docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"] ) isdecimal = _map_and_wrap( "isdecimal", docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"] ) def cat_safe(list_of_columns: list, sep: str): """ Auxiliary function for :meth:`str.cat`. Same signature as cat_core, but handles TypeErrors in concatenation, which happen if the arrays in list_of columns have the wrong dtypes or content. Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep. """ try: result = cat_core(list_of_columns, sep) except TypeError: # if there are any non-string values (wrong dtype or hidden behind # object dtype), np.sum will fail; catch and return with better message for column in list_of_columns: dtype = lib.infer_dtype(column, skipna=True) if dtype not in ["string", "empty"]: raise TypeError( "Concatenation requires list-likes containing only " "strings (or missing values). Offending values found in " f"column {dtype}" ) from None return result def cat_core(list_of_columns: list, sep: str): """ Auxiliary function for :meth:`str.cat` Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep. """ if sep == "": # no need to interleave sep if it is empty arr_of_cols = np.asarray(list_of_columns, dtype=object) return np.sum(arr_of_cols, axis=0) list_with_sep = [sep] * (2 * len(list_of_columns) - 1) list_with_sep[::2] = list_of_columns arr_with_sep = np.asarray(list_with_sep, dtype=object) return np.sum(arr_with_sep, axis=0) def _result_dtype(arr): # workaround #27953 # ideally we just pass `dtype=arr.dtype` unconditionally, but this fails # when the list of values is empty. from pandas.core.arrays.string_ import StringDtype if isinstance(arr.dtype, StringDtype): return arr.dtype else: return object def _get_single_group_name(regex: re.Pattern) -> Hashable: if regex.groupindex: return next(iter(regex.groupindex)) else: return None def _get_group_names(regex: re.Pattern) -> list[Hashable]: """ Get named groups from compiled regex. Unnamed groups are numbered. Parameters ---------- regex : compiled regex Returns ------- list of column labels """ names = {v: k for k, v in regex.groupindex.items()} return [names.get(1 + i, i) for i in range(regex.groups)] def str_extractall(arr, pat, flags: int = 0): regex = re.compile(pat, flags=flags) # the regex must contain capture groups. if regex.groups == 0: raise ValueError("pattern contains no capture groups") if isinstance(arr, ABCIndex): arr = arr.to_series().reset_index(drop=True) columns = _get_group_names(regex) match_list = [] index_list = [] is_mi = arr.index.nlevels > 1 for subject_key, subject in arr.items(): if isinstance(subject, str): if not is_mi: subject_key = (subject_key,) for match_i, match_tuple in enumerate(regex.findall(subject)): if isinstance(match_tuple, str): match_tuple = (match_tuple,) na_tuple = [np.NaN if group == "" else group for group in match_tuple] match_list.append(na_tuple) result_key = tuple(subject_key + (match_i,)) index_list.append(result_key) from pandas import MultiIndex index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"]) dtype = _result_dtype(arr) result = arr._constructor_expanddim( match_list, index=index, columns=columns, dtype=dtype ) return result ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) def str_extractall(arr, pat, flags: int = 0): regex = re.compile(pat, flags=flags) # the regex must contain capture groups. if regex.groups == 0: raise ValueError("pattern contains no capture groups") if isinstance(arr, ABCIndex): arr = arr.to_series().reset_index(drop=True) columns = _get_group_names(regex) match_list = [] index_list = [] is_mi = arr.index.nlevels > 1 for subject_key, subject in arr.items(): if isinstance(subject, str): if not is_mi: subject_key = (subject_key,) for match_i, match_tuple in enumerate(regex.findall(subject)): if isinstance(match_tuple, str): match_tuple = (match_tuple,) na_tuple = [np.NaN if group == "" else group for group in match_tuple] match_list.append(na_tuple) result_key = tuple(subject_key + (match_i,)) index_list.append(result_key) from pandas import MultiIndex index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"]) dtype = _result_dtype(arr) result = arr._constructor_expanddim( match_list, index=index, columns=columns, dtype=dtype ) return result
null
173,375
from __future__ import annotations from typing import ( Any, TypeVar, cast, overload, ) from numpy import ndarray from pandas._libs.lib import ( is_bool, is_integer, ) from pandas._typing import ( Axis, AxisInt, ) from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs, ) validate_argmin = CompatValidator( ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1 ) def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: if isinstance(skipna, ndarray) or skipna is None: args = (skipna,) + args skipna = True return skipna, args The provided code snippet includes necessary dependencies for implementing the `validate_argmin_with_skipna` function. Write a Python function `def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool` to solve the following problem: If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean Here is the function: def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: """ If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna
If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean
173,376
from __future__ import annotations from typing import ( Any, TypeVar, cast, overload, ) from numpy import ndarray from pandas._libs.lib import ( is_bool, is_integer, ) from pandas._typing import ( Axis, AxisInt, ) from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs, ) validate_argmax = CompatValidator( ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1 ) def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: if isinstance(skipna, ndarray) or skipna is None: args = (skipna,) + args skipna = True return skipna, args The provided code snippet includes necessary dependencies for implementing the `validate_argmax_with_skipna` function. Write a Python function `def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool` to solve the following problem: If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean Here is the function: def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: """ If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna
If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean
173,377
from __future__ import annotations from typing import ( Any, TypeVar, cast, overload, ) from numpy import ndarray from pandas._libs.lib import ( is_bool, is_integer, ) from pandas._typing import ( Axis, AxisInt, ) from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs, ) validate_argsort_kind = CompatValidator( ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both" ) def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... The provided code snippet includes necessary dependencies for implementing the `validate_argsort_with_ascending` function. Write a Python function `def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool` to solve the following problem: If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean Here is the function: def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool: """ If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean """ if is_integer(ascending) or ascending is None: args = (ascending,) + args ascending = True validate_argsort_kind(args, kwargs, max_fname_arg_count=3) ascending = cast(bool, ascending) return ascending
If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean
173,378
from __future__ import annotations from typing import ( Any, TypeVar, cast, overload, ) from numpy import ndarray from pandas._libs.lib import ( is_bool, is_integer, ) from pandas._typing import ( Axis, AxisInt, ) from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs, ) def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: ...
null
173,379
from __future__ import annotations from typing import ( Any, TypeVar, cast, overload, ) from numpy import ndarray from pandas._libs.lib import ( is_bool, is_integer, ) from pandas._typing import ( Axis, AxisInt, ) from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs, ) AxisNoneT = TypeVar("AxisNoneT", Axis, None) def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: ...
null