title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Update mangle_dupe_cols documentation to reflect actual state of implementation | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 030ae9fefda98..4d45ff208ae12 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -275,8 +275,8 @@
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
- 'X'...'X'. Passing in False will cause data to be overwritten if there
- are duplicate names in the columns.
+ 'X'...'X'. Passing in False will in the future cause data to be overwritten
+ if there are duplicate names in the columns, but is currently not permitted.
{storage_options}
.. versionadded:: 1.2.0
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index f853251d599d2..6a086bc251efe 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -159,8 +159,8 @@
Use a list comprehension on the DataFrame's columns after calling ``read_csv``.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
- 'X'...'X'. Passing in False will cause data to be overwritten if there
- are duplicate names in the columns.
+ 'X'...'X'. Passing in False will in the future cause data to be overwritten
+ if there are duplicate names in the columns, but is currently not permitted.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
| - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This pull request updates documentation to reflect #13262 being unresolved. | https://api.github.com/repos/pandas-dev/pandas/pulls/47046 | 2022-05-17T23:44:21Z | 2022-06-28T17:27:04Z | null | 2022-06-28T17:27:05Z |
TYP: Mypy workaround for NoDefault | diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index ad77e9e533b0b..d4a766f7086af 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -23,9 +23,11 @@ ndarray_obj_2d = np.ndarray
from enum import Enum
-class NoDefault(Enum): ...
+class _NoDefault(Enum):
+ no_default = ...
-no_default: NoDefault
+no_default = _NoDefault.no_default
+NoDefault = Literal[_NoDefault.no_default]
i8max: int
u8max: int
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 13bd95004445d..4e245d1bd8693 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1,6 +1,7 @@
from collections import abc
from decimal import Decimal
from enum import Enum
+from typing import Literal
import warnings
cimport cython
@@ -2791,7 +2792,7 @@ cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas):
return result
-class NoDefault(Enum):
+class _NoDefault(Enum):
# We make this an Enum
# 1) because it round-trips through pickle correctly (see GH#40397)
# 2) because mypy does not understand singletons
@@ -2802,7 +2803,8 @@ class NoDefault(Enum):
# Note: no_default is exported to the public API in pandas.api.extensions
-no_default = NoDefault.no_default # Sentinel indicating the default value.
+no_default = _NoDefault.no_default # Sentinel indicating the default value.
+NoDefault = Literal[_NoDefault.no_default]
@cython.boundscheck(False)
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index ab42fcd92a3d9..efbe9995525d7 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -112,12 +112,7 @@ def assert_almost_equal(
FutureWarning,
stacklevel=find_stack_level(),
)
- # https://github.com/python/mypy/issues/7642
- # error: Argument 1 to "_get_tol_from_less_precise" has incompatible
- # type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
- rtol = atol = _get_tol_from_less_precise(
- check_less_precise # type: ignore[arg-type]
- )
+ rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, Index):
assert_index_equal(
@@ -345,12 +340,7 @@ def _get_ilevel_values(index, level):
FutureWarning,
stacklevel=find_stack_level(),
)
- # https://github.com/python/mypy/issues/7642
- # error: Argument 1 to "_get_tol_from_less_precise" has incompatible
- # type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
- rtol = atol = _get_tol_from_less_precise(
- check_less_precise # type: ignore[arg-type]
- )
+ rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
diff --git a/pandas/_typing.py b/pandas/_typing.py
index e71859e91785e..a85820a403fde 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -313,7 +313,8 @@ def closed(self) -> bool:
XMLParsers = Literal["lxml", "etree"]
# Interval closed type
-IntervalClosedType = Literal["left", "right", "both", "neither"]
+IntervalLeftRight = Literal["left", "right"]
+IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]]
# datetime and NaTType
DatetimeNaTType = Union[datetime, "NaTType"]
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 679feaca71024..4c81fe8b61a1f 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -263,7 +263,7 @@ def _simple_new(
cls: type[IntervalArrayT],
left,
right,
- closed=None,
+ closed: IntervalClosedType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
verify_integrity: bool = True,
@@ -416,7 +416,7 @@ def _from_factorized(
def from_breaks(
cls: type[IntervalArrayT],
breaks,
- closed="right",
+ closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
@@ -492,7 +492,7 @@ def from_arrays(
cls: type[IntervalArrayT],
left,
right,
- closed="right",
+ closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
@@ -956,10 +956,10 @@ def _concat_same_type(
-------
IntervalArray
"""
- closed = {interval.closed for interval in to_concat}
- if len(closed) != 1:
+ closed_set = {interval.closed for interval in to_concat}
+ if len(closed_set) != 1:
raise ValueError("Intervals must all be closed on the same side.")
- closed = closed.pop()
+ closed = closed_set.pop()
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
@@ -1328,7 +1328,7 @@ def overlaps(self, other):
# ---------------------------------------------------------------------
@property
- def closed(self):
+ def closed(self) -> IntervalClosedType:
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 2e8d6dbced4e3..eeb18759fc72c 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -673,7 +673,7 @@ def resolve_numeric_only(numeric_only: bool | None | lib.NoDefault) -> bool:
# first default to None
result = False
else:
- result = cast(bool, numeric_only)
+ result = numeric_only
return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d563aa8b06ca5..e4de71067665a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6279,8 +6279,7 @@ def dropna(
# faster equivalent to 'agg_obj.count(agg_axis) > 0'
mask = notna(agg_obj).any(axis=agg_axis, bool_only=False)
else:
- if how is not no_default:
- raise ValueError(f"invalid how option: {how}")
+ raise ValueError(f"invalid how option: {how}")
if np.all(mask):
result = self.copy()
@@ -8050,9 +8049,6 @@ def groupby(
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- # https://github.com/python/mypy/issues/7642
- # error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type
- # "Union[bool, NoDefault]"; expected "bool"
return DataFrameGroupBy(
obj=self,
keys=by,
@@ -8061,7 +8057,7 @@ def groupby(
as_index=as_index,
sort=sort,
group_keys=group_keys,
- squeeze=squeeze, # type: ignore[arg-type]
+ squeeze=squeeze,
observed=observed,
dropna=dropna,
)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1f524d07bfd3a..9feb6c0512302 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7898,8 +7898,8 @@ def between_time(
FutureWarning,
stacklevel=find_stack_level(),
)
- left = True if isinstance(include_start, lib.NoDefault) else include_start
- right = True if isinstance(include_end, lib.NoDefault) else include_end
+ left = True if include_start is lib.no_default else include_start
+ right = True if include_end is lib.no_default else include_end
inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = {
(True, True): "both",
@@ -10689,7 +10689,6 @@ def _stat_function(
if axis is None:
axis = self._stat_axis_number
- axis = cast(Axis, axis)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2acf5c826eb57..090554f2eafe5 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1580,7 +1580,7 @@ def idxmax(
# DataFrame.idxmax for backwards compatibility
numeric_only_arg = None if axis == 0 else False
else:
- numeric_only_arg = cast(bool, numeric_only)
+ numeric_only_arg = numeric_only
def func(df):
res = df._reduce(
@@ -1616,7 +1616,7 @@ def idxmin(
# DataFrame.idxmin for backwards compatibility
numeric_only_arg = None if axis == 0 else False
else:
- numeric_only_arg = cast(bool, numeric_only)
+ numeric_only_arg = numeric_only
def func(df):
res = df._reduce(
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 6cee6c1913f74..ffbee0bf21a66 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1277,9 +1277,7 @@ def _resolve_numeric_only(
else:
numeric_only = False
- # error: Incompatible return value type (got "Union[bool, NoDefault]",
- # expected "bool")
- return numeric_only # type: ignore[return-value]
+ return numeric_only
def _maybe_warn_numeric_only_depr(
self, how: str, result: DataFrame | Series, numeric_only: bool | lib.NoDefault
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 806d081c0176b..e7b810dacdf57 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -34,6 +34,7 @@
Dtype,
DtypeObj,
IntervalClosedType,
+ IntervalLeftRight,
npt,
)
from pandas.util._decorators import (
@@ -1039,12 +1040,12 @@ def date_range(
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
- if inclusive is not None and not isinstance(closed, lib.NoDefault):
+ if inclusive is not None and closed is not lib.no_default:
raise ValueError(
"Deprecated argument `closed` cannot be passed"
"if argument `inclusive` is not None"
)
- elif not isinstance(closed, lib.NoDefault):
+ elif closed is not lib.no_default:
warnings.warn(
"Argument `closed` is deprecated in favor of `inclusive`.",
FutureWarning,
@@ -1087,7 +1088,7 @@ def bdate_range(
name: Hashable = None,
weekmask=None,
holidays=None,
- closed: lib.NoDefault = lib.no_default,
+ closed: IntervalLeftRight | lib.NoDefault | None = lib.no_default,
inclusive: IntervalClosedType | None = None,
**kwargs,
) -> DatetimeIndex:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index c3acfc5ff2f66..a89b52e0950f2 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -251,7 +251,7 @@ def __new__(
def from_breaks(
cls,
breaks,
- closed: str = "right",
+ closed: IntervalClosedType | None = "right",
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
@@ -282,7 +282,7 @@ def from_arrays(
cls,
left,
right,
- closed: str = "right",
+ closed: IntervalClosedType = "right",
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
@@ -957,7 +957,7 @@ def interval_range(
periods=None,
freq=None,
name: Hashable = None,
- closed: lib.NoDefault = lib.no_default,
+ closed: IntervalClosedType | lib.NoDefault = lib.no_default,
inclusive: IntervalClosedType | None = None,
) -> IntervalIndex:
"""
@@ -1054,12 +1054,12 @@ def interval_range(
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
dtype='interval[int64, both]')
"""
- if inclusive is not None and not isinstance(closed, lib.NoDefault):
+ if inclusive is not None and closed is not lib.no_default:
raise ValueError(
"Deprecated argument `closed` cannot be passed "
"if argument `inclusive` is not None"
)
- elif not isinstance(closed, lib.NoDefault):
+ elif closed is not lib.no_default:
warnings.warn(
"Argument `closed` is deprecated in favor of `inclusive`.",
FutureWarning,
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 29df930c5aaf3..1608260205dd1 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -10,6 +10,7 @@
Hashable,
Iterable,
List,
+ Literal,
Sequence,
Tuple,
cast,
@@ -1397,7 +1398,7 @@ def format(
sparsify = get_option("display.multi_sparse")
if sparsify:
- sentinel = ""
+ sentinel: Literal[""] | bool | lib.NoDefault = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index d8c4f3f3da765..94705790e40bd 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -16,6 +16,7 @@
Timestamp,
)
from pandas._libs.lib import infer_dtype
+from pandas._typing import IntervalLeftRight
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
@@ -560,7 +561,7 @@ def _format_labels(
bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None
):
"""based on the dtype, return our labels"""
- closed = "right" if right else "left"
+ closed: IntervalLeftRight = "right" if right else "left"
formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b740bac78b263..d1514a3872800 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1952,8 +1952,6 @@ def groupby(
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- # error: Argument "squeeze" to "SeriesGroupBy" has incompatible type
- # "Union[bool, NoDefault]"; expected "bool"
return SeriesGroupBy(
obj=self,
keys=by,
@@ -1962,7 +1960,7 @@ def groupby(
as_index=as_index,
sort=sort,
group_keys=group_keys,
- squeeze=squeeze, # type: ignore[arg-type]
+ squeeze=squeeze,
observed=observed,
dropna=dropna,
)
| This uses the mypy-workaround from https://github.com/python/typeshed/pull/7127/files for `NoDefault` (works also for pyright).
`no_default` is public but `NoDefault` is luckily not public, so it should be fine to do the following:
- rename the existing `NoDefault` to `_NoDefault` (implementation and typing)
- define `NoDefault` as `NoDefault = Literal[_NoDefault.no_default]` (implementation and typing; `isinstance` checks need to be re-written to use `is no_default` but all other type annotations can stay the same)
- `no_default` "remains" `no_default = _NoDefault.no_default` (just replacing `NoDefault` with `_NoDefault`)
@simonjayhawkins @jreback @Dr-Irv
edit:
Some context: The following will work without mypy errors afterwards:
```py
def foo(x: NoDefault | int) -> int:
if x is no_default:
return -1
return x + 1
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/47045 | 2022-05-17T22:35:53Z | 2022-05-25T21:53:55Z | 2022-05-25T21:53:55Z | 2022-05-26T01:58:47Z |
ENH: DatetimeArray fields support non-nano | diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi
index e404eadf13657..b1d9e0342f81e 100644
--- a/pandas/_libs/tslibs/fields.pyi
+++ b/pandas/_libs/tslibs/fields.pyi
@@ -22,6 +22,7 @@ def get_start_end_field(
def get_date_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int32]: ...
def get_timedelta_field(
tdindex: npt.NDArray[np.int64], # const int64_t[:]
@@ -32,6 +33,7 @@ def isleapyear_arr(
) -> npt.NDArray[np.bool_]: ...
def build_isocalendar_sarray(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> np.ndarray: ...
def _get_locale_names(name_type: str, locale: str | None = ...): ...
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 57d4c27b3337d..5865b8c6877b0 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -329,7 +329,7 @@ def get_start_end_field(
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_field(const int64_t[:] dtindex, str field):
+def get_date_field(const int64_t[:] dtindex, str field, NPY_DATETIMEUNIT reso=NPY_FR_ns):
"""
Given a int64-based datetime index, extract the year, month, etc.,
field and return an array of these values.
@@ -348,7 +348,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.year
return out
@@ -359,7 +359,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.month
return out
@@ -370,7 +370,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.day
return out
@@ -381,8 +381,9 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.hour
+ # TODO: can we de-dup with period.pyx <accessor>s?
return out
elif field == 'm':
@@ -392,7 +393,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.min
return out
@@ -403,7 +404,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.sec
return out
@@ -414,7 +415,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.us
return out
@@ -425,7 +426,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.ps // 1000
return out
elif field == 'doy':
@@ -435,7 +436,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = get_day_of_year(dts.year, dts.month, dts.day)
return out
@@ -446,7 +447,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dayofweek(dts.year, dts.month, dts.day)
return out
@@ -457,7 +458,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = get_week_of_year(dts.year, dts.month, dts.day)
return out
@@ -468,7 +469,7 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = dts.month
out[i] = ((out[i] - 1) // 3) + 1
return out
@@ -480,11 +481,11 @@ def get_date_field(const int64_t[:] dtindex, str field):
out[i] = -1
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = get_days_in_month(dts.year, dts.month)
return out
elif field == 'is_leap_year':
- return isleapyear_arr(get_date_field(dtindex, 'Y'))
+ return isleapyear_arr(get_date_field(dtindex, 'Y', reso=reso))
raise ValueError(f"Field {field} not supported")
@@ -564,7 +565,7 @@ cpdef isleapyear_arr(ndarray years):
@cython.wraparound(False)
@cython.boundscheck(False)
-def build_isocalendar_sarray(const int64_t[:] dtindex):
+def build_isocalendar_sarray(const int64_t[:] dtindex, NPY_DATETIMEUNIT reso=NPY_FR_ns):
"""
Given a int64-based datetime array, return the ISO 8601 year, week, and day
as a structured array.
@@ -592,7 +593,7 @@ def build_isocalendar_sarray(const int64_t[:] dtindex):
if dtindex[i] == NPY_NAT:
ret_val = 0, 0, 0
else:
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
ret_val = get_iso_calendar(dts.year, dts.month, dts.day)
iso_years[i] = ret_val[0]
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 6f984727f4f6d..dadfad394b903 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -136,7 +136,7 @@ def f(self):
values, field, self.freqstr, month_kw, reso=self._reso
)
else:
- result = fields.get_date_field(values, field)
+ result = fields.get_date_field(values, field, reso=self._reso)
# these return a boolean by-definition
return result
@@ -146,7 +146,7 @@ def f(self):
result = self._maybe_mask_results(result, fill_value=None)
else:
- result = fields.get_date_field(values, field)
+ result = fields.get_date_field(values, field, reso=self._reso)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
@@ -1403,7 +1403,7 @@ def isocalendar(self) -> DataFrame:
from pandas import DataFrame
values = self._local_timestamps()
- sarray = fields.build_isocalendar_sarray(values)
+ sarray = fields.build_isocalendar_sarray(values, reso=self._reso)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 8eb5cc2dd82f6..897528cf18122 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -12,7 +12,15 @@
class TestNonNano:
- @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)])
+ @pytest.fixture(params=["s", "ms", "us"])
+ def unit(self, request):
+ return request.param
+
+ @pytest.fixture
+ def reso(self, unit):
+ # TODO: avoid hard-coding
+ return {"s": 7, "ms": 8, "us": 9}[unit]
+
@pytest.mark.xfail(reason="_box_func is not yet patched to get reso right")
def test_non_nano(self, unit, reso):
arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
@@ -21,6 +29,22 @@ def test_non_nano(self, unit, reso):
assert dta.dtype == arr.dtype
assert dta[0]._reso == reso
+ @pytest.mark.filterwarnings(
+ "ignore:weekofyear and week have been deprecated:FutureWarning"
+ )
+ @pytest.mark.parametrize(
+ "field", DatetimeArray._field_ops + DatetimeArray._bool_ops
+ )
+ def test_fields(self, unit, reso, field):
+ dti = pd.date_range("2016-01-01", periods=55, freq="D")
+ arr = np.asarray(dti).astype(f"M8[{unit}]")
+
+ dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
+
+ res = getattr(dta, field)
+ expected = getattr(dti._data, field)
+ tm.assert_numpy_array_equal(res, expected)
+
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47044 | 2022-05-17T22:13:21Z | 2022-05-18T01:01:08Z | 2022-05-18T01:01:08Z | 2022-05-18T01:05:33Z |
Backport PR #46394 on branch 1.4.x (CI: Use conda-forge PyPy) | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index b86dcea59edb8..35c40f2a4aa54 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -155,24 +155,11 @@ jobs:
channel-priority: flexible
environment-file: ${{ env.ENV_FILE }}
use-only-tar-bz2: true
- if: ${{ env.IS_PYPY == 'false' }} # No pypy3.8 support
- name: Upgrade Arrow version
run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=${{ matrix.pyarrow_version }}
if: ${{ matrix.pyarrow_version }}
- - name: Setup PyPy
- uses: actions/setup-python@v3
- with:
- python-version: "pypy-3.8"
- if: ${{ env.IS_PYPY == 'true' }}
-
- - name: Setup PyPy dependencies
- run: |
- # TODO: re-enable cov, its slowing the tests down though
- pip install Cython numpy python-dateutil pytz pytest>=6.0 pytest-xdist>=1.31.0 pytest-asyncio>=0.17 hypothesis>=5.5.3
- if: ${{ env.IS_PYPY == 'true' }}
-
- name: Build Pandas
uses: ./.github/actions/build_pandas
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml
index ad05d2ab2dacc..eda35ee14ec65 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-38.yaml
@@ -11,6 +11,7 @@ dependencies:
- cython>=0.29.24
- pytest>=6.0
- pytest-cov
+ - pytest-asyncio
- pytest-xdist>=1.31
- hypothesis>=5.5.3
| Backport PR #46394: CI: Use conda-forge PyPy | https://api.github.com/repos/pandas-dev/pandas/pulls/47040 | 2022-05-17T09:17:38Z | 2022-05-17T11:06:54Z | 2022-05-17T11:06:54Z | 2022-05-17T11:06:54Z |
REF: share parts of DTI and PI | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 25b7a5c3d3689..811dc72e9b908 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -210,8 +210,12 @@ def _summary(self, name=None) -> str:
# --------------------------------------------------------------------
# Indexing Methods
+ @final
def _can_partial_date_slice(self, reso: Resolution) -> bool:
- raise NotImplementedError
+ # e.g. test_getitem_setitem_periodindex
+ # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
+ return reso > self._resolution_obj
+ # NB: for DTI/PI, not TDI
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
raise NotImplementedError
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 3954cb28c2aca..5274f68eb3171 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -593,10 +593,6 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
end = self._maybe_cast_for_get_loc(end)
return start, end
- def _can_partial_date_slice(self, reso: Resolution) -> bool:
- # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
- return reso > self._resolution_obj
-
def _deprecate_mismatched_indexing(self, key) -> None:
# GH#36148
# we get here with isinstance(key, self._data._recognized_scalars)
@@ -651,12 +647,8 @@ def get_loc(self, key, method=None, tolerance=None):
except KeyError as err:
if method is None:
raise KeyError(key) from err
- try:
- key = self._maybe_cast_for_get_loc(key)
- except ValueError as err:
- # FIXME(dateutil#1180): we get here because parse_with_reso
- # doesn't raise on "t2m"
- raise KeyError(key) from err
+
+ key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, timedelta):
# GH#20464
@@ -682,7 +674,16 @@ def get_loc(self, key, method=None, tolerance=None):
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes or dates (GH 35690)
- key = Timestamp(key)
+ try:
+ key = Timestamp(key)
+ except ValueError as err:
+ # FIXME(dateutil#1180): we get here because parse_with_reso
+ # doesn't raise on "t2m"
+ if not isinstance(key, str):
+ # Not expected to be reached, but check to be sure
+ raise # pragma: no cover
+ raise KeyError(key) from err
+
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
@@ -691,6 +692,13 @@ def _maybe_cast_for_get_loc(self, key) -> Timestamp:
@doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
+
+ # GH#42855 handle date here instead of get_slice_bound
+ if isinstance(label, date) and not isinstance(label, datetime):
+ # Pandas supports slicing with dates, treated as datetimes at midnight.
+ # https://github.com/pandas-dev/pandas/issues/31501
+ label = Timestamp(label).to_pydatetime()
+
label = super()._maybe_cast_slice_bound(label, side, kind=kind)
self._deprecate_mismatched_indexing(label)
return self._maybe_cast_for_get_loc(label)
@@ -722,13 +730,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default):
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
- # Pandas supports slicing with dates, treated as datetimes at midnight.
- # https://github.com/pandas-dev/pandas/issues/31501
- if isinstance(start, date) and not isinstance(start, datetime):
- start = datetime.combine(start, time(0, 0))
- if isinstance(end, date) and not isinstance(end, datetime):
- end = datetime.combine(end, time(0, 0))
-
def check_str_or_none(point):
return point is not None and not isinstance(point, str)
@@ -768,15 +769,6 @@ def check_str_or_none(point):
else:
return indexer
- @doc(Index.get_slice_bound)
- def get_slice_bound(
- self, label, side: Literal["left", "right"], kind=lib.no_default
- ) -> int:
- # GH#42855 handle date here instead of _maybe_cast_slice_bound
- if isinstance(label, date) and not isinstance(label, datetime):
- label = Timestamp(label).to_pydatetime()
- return super().get_slice_bound(label, side=side, kind=kind)
-
# --------------------------------------------------------------------
@property
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 592e6e9fb703d..e3ab5e8624585 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -25,7 +25,10 @@
DtypeObj,
npt,
)
-from pandas.util._decorators import doc
+from pandas.util._decorators import (
+ cache_readonly,
+ doc,
+)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -159,6 +162,12 @@ class PeriodIndex(DatetimeIndexOpsMixin):
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
+ @cache_readonly
+ # Signature of "_resolution_obj" incompatible with supertype "DatetimeIndexOpsMixin"
+ def _resolution_obj(self) -> Resolution: # type: ignore[override]
+ # for compat with DatetimeIndex
+ return self.dtype._resolution_obj
+
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@@ -446,10 +455,10 @@ def get_loc(self, key, method=None, tolerance=None):
# TODO: pass if method is not None, like DTI does?
raise KeyError(key) from err
- if reso == self.dtype._resolution_obj:
- # the reso < self.dtype._resolution_obj case goes
+ if reso == self._resolution_obj:
+ # the reso < self._resolution_obj case goes
# through _get_string_slice
- key = Period(parsed, freq=self.freq)
+ key = self._cast_partial_indexing_scalar(key)
loc = self.get_loc(key, method=method, tolerance=tolerance)
# Recursing instead of falling through matters for the exception
# message in test_get_loc3 (though not clear if that really matters)
@@ -457,28 +466,14 @@ def get_loc(self, key, method=None, tolerance=None):
elif method is None:
raise KeyError(key)
else:
- key = Period(parsed, freq=self.freq)
+ key = self._cast_partial_indexing_scalar(parsed)
elif isinstance(key, Period):
- sfreq = self.freq
- kfreq = key.freq
- if not (
- sfreq.n == kfreq.n
- # error: "BaseOffset" has no attribute "_period_dtype_code"
- and sfreq._period_dtype_code # type: ignore[attr-defined]
- # error: "BaseOffset" has no attribute "_period_dtype_code"
- == kfreq._period_dtype_code # type: ignore[attr-defined]
- ):
- # GH#42247 For the subset of DateOffsets that can be Period freqs,
- # checking these two attributes is sufficient to check equality,
- # and much more performant than `self.freq == key.freq`
- raise KeyError(key)
+ key = self._maybe_cast_for_get_loc(key)
+
elif isinstance(key, datetime):
- try:
- key = Period(key, freq=self.freq)
- except ValueError as err:
- # we cannot construct the Period
- raise KeyError(orig_key) from err
+ key = self._cast_partial_indexing_scalar(key)
+
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
@@ -488,10 +483,35 @@ def get_loc(self, key, method=None, tolerance=None):
except KeyError as err:
raise KeyError(orig_key) from err
+ def _maybe_cast_for_get_loc(self, key: Period) -> Period:
+ # name is a misnomer, chosen for compat with DatetimeIndex
+ sfreq = self.freq
+ kfreq = key.freq
+ if not (
+ sfreq.n == kfreq.n
+ # error: "BaseOffset" has no attribute "_period_dtype_code"
+ and sfreq._period_dtype_code # type: ignore[attr-defined]
+ # error: "BaseOffset" has no attribute "_period_dtype_code"
+ == kfreq._period_dtype_code # type: ignore[attr-defined]
+ ):
+ # GH#42247 For the subset of DateOffsets that can be Period freqs,
+ # checking these two attributes is sufficient to check equality,
+ # and much more performant than `self.freq == key.freq`
+ raise KeyError(key)
+ return key
+
+ def _cast_partial_indexing_scalar(self, label):
+ try:
+ key = Period(label, freq=self.freq)
+ except ValueError as err:
+ # we cannot construct the Period
+ raise KeyError(label) from err
+ return key
+
@doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
if isinstance(label, datetime):
- label = Period(label, freq=self.freq)
+ label = self._cast_partial_indexing_scalar(label)
return super()._maybe_cast_slice_bound(label, side, kind=kind)
@@ -499,11 +519,6 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
iv = Period(parsed, freq=reso.attr_abbrev)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
- def _can_partial_date_slice(self, reso: Resolution) -> bool:
- assert isinstance(reso, Resolution), (type(reso), reso)
- # e.g. test_getitem_setitem_periodindex
- return reso > self.dtype._resolution_obj
-
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47038 | 2022-05-17T03:13:56Z | 2022-05-17T12:32:02Z | 2022-05-17T12:32:02Z | 2022-05-17T14:37:52Z |
TYP: pandas/_testing | diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
index 9e89e09e418b3..1a8fe71ae3728 100644
--- a/pandas/_testing/_warnings.py
+++ b/pandas/_testing/_warnings.py
@@ -7,6 +7,7 @@
import re
import sys
from typing import (
+ Literal,
Sequence,
Type,
cast,
@@ -17,7 +18,9 @@
@contextmanager
def assert_produces_warning(
expected_warning: type[Warning] | bool | None = Warning,
- filter_level="always",
+ filter_level: Literal[
+ "error", "ignore", "always", "default", "module", "once"
+ ] = "always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: str | None = None,
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index ab42fcd92a3d9..b5e288690decb 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -1,6 +1,9 @@
from __future__ import annotations
-from typing import cast
+from typing import (
+ Literal,
+ cast,
+)
import warnings
import numpy as np
@@ -10,6 +13,7 @@
no_default,
)
from pandas._libs.missing import is_matching_na
+from pandas._libs.sparse import SparseIndex
import pandas._libs.testing as _testing
from pandas.util._exceptions import find_stack_level
@@ -61,7 +65,7 @@
def assert_almost_equal(
left,
right,
- check_dtype: bool | str = "equiv",
+ check_dtype: bool | Literal["equiv"] = "equiv",
check_less_precise: bool | int | NoDefault = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
@@ -169,9 +173,8 @@ def assert_almost_equal(
assert_class_equal(left, right, obj=obj)
# if we have "equiv", this becomes True
- check_dtype = bool(check_dtype)
_testing.assert_almost_equal(
- left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
+ left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs
)
@@ -686,7 +689,7 @@ def assert_numpy_array_equal(
left,
right,
strict_nan=False,
- check_dtype=True,
+ check_dtype: bool | Literal["equiv"] = True,
err_msg=None,
check_same=None,
obj="numpy array",
@@ -765,7 +768,7 @@ def _raise(left, right, err_msg):
def assert_extension_array_equal(
left,
right,
- check_dtype=True,
+ check_dtype: bool | Literal["equiv"] = True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
@@ -858,7 +861,7 @@ def assert_extension_array_equal(
_testing.assert_almost_equal(
left_valid,
right_valid,
- check_dtype=check_dtype,
+ check_dtype=bool(check_dtype),
rtol=rtol,
atol=atol,
obj="ExtensionArray",
@@ -870,7 +873,7 @@ def assert_extension_array_equal(
def assert_series_equal(
left,
right,
- check_dtype=True,
+ check_dtype: bool | Literal["equiv"] = True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
@@ -1064,7 +1067,7 @@ def assert_series_equal(
right._values,
rtol=rtol,
atol=atol,
- check_dtype=check_dtype,
+ check_dtype=bool(check_dtype),
obj=str(obj),
index_values=np.asarray(left.index),
)
@@ -1100,7 +1103,7 @@ def assert_series_equal(
right._values,
rtol=rtol,
atol=atol,
- check_dtype=check_dtype,
+ check_dtype=bool(check_dtype),
obj=str(obj),
index_values=np.asarray(left.index),
)
@@ -1125,7 +1128,7 @@ def assert_series_equal(
def assert_frame_equal(
left,
right,
- check_dtype=True,
+ check_dtype: bool | Literal["equiv"] = True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
@@ -1403,8 +1406,8 @@ def assert_sp_array_equal(left, right):
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
- assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
- assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
+ assert isinstance(left.sp_index, SparseIndex)
+ assert isinstance(right.sp_index, SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index d7eba6b8319fb..bbcf984e68b4b 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -35,6 +35,7 @@ def test_foo():
from pandas._config import get_option
+from pandas._typing import F
from pandas.compat import (
IS64,
is_platform_windows,
@@ -216,7 +217,7 @@ def skip_if_np_lt(ver_str: str, *args, reason: str | None = None):
)
-def parametrize_fixture_doc(*args):
+def parametrize_fixture_doc(*args) -> Callable[[F], F]:
"""
Intended for use as a decorator for parametrized fixture,
this function will wrap the decorated function with a pytest
| Toward's pyrights reportGeneralTypeIssues. After this almost all exceptions for reportGeneralTypeIssues are in pandas/{core, io, plotting}. | https://api.github.com/repos/pandas-dev/pandas/pulls/47037 | 2022-05-17T01:36:09Z | 2022-06-05T22:49:13Z | 2022-06-05T22:49:13Z | 2022-06-08T19:26:35Z |
REF: write indexing checks in terms of should_fallback_to_positional | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 8ebaaa28e13a5..c4a3afbb282cf 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4205,9 +4205,14 @@ def is_int(v):
return v is None or is_integer(v)
is_index_slice = is_int(start) and is_int(stop) and is_int(step)
- is_positional = is_index_slice and not (
- self.is_integer() or self.is_categorical()
+
+ # special case for interval_dtype bc we do not do partial-indexing
+ # on integer Intervals when slicing
+ # TODO: write this in terms of e.g. should_partial_index?
+ ints_are_positional = self._should_fallback_to_positional or is_interval_dtype(
+ self.dtype
)
+ is_positional = is_index_slice and ints_are_positional
if kind == "getitem":
"""
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 174e0a7f81850..c1cb5ad315298 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -222,6 +222,8 @@ def _convert_slice_indexer(self, key: slice, kind: str, is_frame: bool = False):
if is_float_dtype(self.dtype):
assert kind in ["loc", "getitem"]
+ # TODO: can we write this as a condition based on
+ # e.g. _should_fallback_to_positional?
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index ddae58fd46bb0..02c095202d079 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1322,10 +1322,6 @@ def _convert_to_indexer(self, key, axis: int):
if isinstance(key, slice):
return labels._convert_slice_indexer(key, kind="loc")
- # see if we are positional in nature
- is_int_index = labels.is_integer()
- is_int_positional = is_integer(key) and not is_int_index
-
if (
isinstance(key, tuple)
and not isinstance(labels, MultiIndex)
@@ -1350,17 +1346,9 @@ def _convert_to_indexer(self, key, axis: int):
if not isinstance(labels, MultiIndex):
raise
except ValueError:
- if not is_int_positional:
+ if not is_integer(key):
raise
-
- # a positional
- if is_int_positional:
-
- # if we are setting and its not a valid location
- # its an insert which fails by definition
-
- # always valid
- return {"key": key}
+ return {"key": key}
if is_nested_tuple(key, labels):
if self.ndim == 1 and any(isinstance(k, tuple) for k in key):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47036 | 2022-05-16T20:05:15Z | 2022-05-17T12:32:44Z | 2022-05-17T12:32:44Z | 2022-05-17T14:37:27Z |
Stacklevel argument updated #46687 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 589ea6e67d926..5d2331298fc98 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10973,7 +10973,6 @@ def _add_numeric_operations(cls):
@deprecate_nonkeyword_arguments(
version=None,
allowed_args=["self"],
- stacklevel=find_stack_level() - 1,
name="DataFrame.any and Series.any",
)
@doc(
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index fbea7a71202eb..b6261b05f6cd7 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -318,9 +318,7 @@ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
decompression_options=_shared_docs["decompression_options"] % "path_or_buf",
)
@deprecate_kwarg(old_arg_name="numpy", new_arg_name=None)
-@deprecate_nonkeyword_arguments(
- version="2.0", allowed_args=["path_or_buf"], stacklevel=3
-)
+@deprecate_nonkeyword_arguments(version="2.0", allowed_args=["path_or_buf"])
def read_json(
path_or_buf=None,
orient=None,
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index e811ace78f1f5..763bae946a1b4 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -1166,9 +1166,7 @@ def read_table(
...
-@deprecate_nonkeyword_arguments(
- version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
-)
+@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
@@ -1265,9 +1263,7 @@ def read_table(
return _read(filepath_or_buffer, kwds)
-@deprecate_nonkeyword_arguments(
- version=None, allowed_args=["filepath_or_buffer"], stacklevel=2
-)
+@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
def read_fwf(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
colspecs: Sequence[tuple[int, int]] | str | None = "infer",
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 052e674d1a488..57014ef9c9622 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -78,9 +78,7 @@ def read_sas(
...
-@deprecate_nonkeyword_arguments(
- version=None, allowed_args=["filepath_or_buffer"], stacklevel=2
-)
+@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
@doc(decompression_options=_shared_docs["decompression_options"])
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 78fbeaad09300..57bb72a951853 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -974,9 +974,7 @@ def _parse(
)
-@deprecate_nonkeyword_arguments(
- version=None, allowed_args=["path_or_buffer"], stacklevel=2
-)
+@deprecate_nonkeyword_arguments(version=None, allowed_args=["path_or_buffer"])
@doc(
storage_options=_shared_docs["storage_options"],
decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 7f60fd60c91b0..a0da3a7eaadce 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -920,5 +920,4 @@ def test_read_table_posargs_deprecation(all_parsers):
"In a future version of pandas all arguments of read_table "
"except for the argument 'filepath_or_buffer' will be keyword-only"
)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- parser.read_table(data, " ")
+ parser.read_table_check_warnings(FutureWarning, msg, data, " ")
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index 066f448d97505..0462d1fe6da0b 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -42,6 +42,16 @@ def read_table(self, *args, **kwargs):
kwargs = self.update_kwargs(kwargs)
return read_table(*args, **kwargs)
+ def read_table_check_warnings(
+ self, warn_type: type[Warning], warn_msg: str, *args, **kwargs
+ ):
+ # We need to check the stacklevel here instead of in the tests
+ # since this is where read_table is called and where the warning
+ # should point to.
+ kwargs = self.update_kwargs(kwargs)
+ with tm.assert_produces_warning(warn_type, match=warn_msg):
+ return read_table(*args, **kwargs)
+
class CParser(BaseParser):
engine = "c"
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 0f15511e491cc..978f2982e6d18 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -13,6 +13,7 @@
from pandas._libs.properties import cache_readonly # noqa:F401
from pandas._typing import F
+from pandas.util._exceptions import find_stack_level
def deprecate(
@@ -260,7 +261,6 @@ def future_version_msg(version: str | None) -> str:
def deprecate_nonkeyword_arguments(
version: str | None,
allowed_args: list[str] | None = None,
- stacklevel: int = 2,
name: str | None = None,
) -> Callable[[F], F]:
"""
@@ -280,9 +280,6 @@ def deprecate_nonkeyword_arguments(
defaults to list of all arguments not having the
default value.
- stacklevel : int, default=2
- The stack level for warnings.warn
-
name : str, optional
The specific name of the function to show in the warning
message. If None, then the Qualified name of the function
@@ -312,7 +309,7 @@ def wrapper(*args, **kwargs):
warnings.warn(
msg.format(arguments=arguments),
FutureWarning,
- stacklevel=stacklevel,
+ stacklevel=find_stack_level(),
)
return func(*args, **kwargs)
| - [x] closes #46687
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47035 | 2022-05-16T19:58:59Z | 2022-06-30T18:06:56Z | 2022-06-30T18:06:56Z | 2022-06-30T18:07:06Z |
ENH: Incorproate ArrowDtype into ArrowExtensionArray | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 53e003e2ed7dd..fbf1cea670c5c 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -26,6 +26,7 @@
)
from pandas._typing import Dtype
+from pandas.compat import pa_version_under1p01
from pandas.core.dtypes.common import (
is_float_dtype,
@@ -193,6 +194,45 @@
]
]
+if not pa_version_under1p01:
+ import pyarrow as pa
+
+ UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
+ SIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.int16(), pa.int32(), pa.uint64()]
+ ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES
+
+ FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()]
+ STRING_PYARROW_DTYPES = [pa.string(), pa.utf8()]
+
+ TIME_PYARROW_DTYPES = [
+ pa.time32("s"),
+ pa.time32("ms"),
+ pa.time64("us"),
+ pa.time64("ns"),
+ ]
+ DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()]
+ DATETIME_PYARROW_DTYPES = [
+ pa.timestamp(unit=unit, tz=tz)
+ for unit in ["s", "ms", "us", "ns"]
+ for tz in [None, "UTC", "US/Pacific", "US/Eastern"]
+ ]
+ TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]]
+
+ BOOL_PYARROW_DTYPES = [pa.bool_()]
+
+ # TODO: Add container like pyarrow types:
+ # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions
+ ALL_PYARROW_DTYPES = (
+ ALL_INT_PYARROW_DTYPES
+ + FLOAT_PYARROW_DTYPES
+ + TIME_PYARROW_DTYPES
+ + DATE_PYARROW_DTYPES
+ + DATETIME_PYARROW_DTYPES
+ + TIMEDELTA_PYARROW_DTYPES
+ + BOOL_PYARROW_DTYPES
+ )
+
+
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index fdd505e259dd9..66bb12db277fc 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -9,6 +9,8 @@
import numpy as np
from pandas._typing import (
+ Dtype,
+ PositionalIndexer,
TakeIndexer,
npt,
)
@@ -24,6 +26,7 @@
is_array_like,
is_bool_dtype,
is_integer,
+ is_integer_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna
@@ -31,6 +34,7 @@
from pandas.core.arrays.base import ExtensionArray
from pandas.core.indexers import (
check_array_indexer,
+ unpack_tuple_and_ellipses,
validate_indices,
)
@@ -39,6 +43,7 @@
import pyarrow.compute as pc
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
+ from pandas.core.arrays.arrow.dtype import ArrowDtype
if TYPE_CHECKING:
from pandas import Series
@@ -48,16 +53,130 @@
class ArrowExtensionArray(ExtensionArray):
"""
- Base class for ExtensionArray backed by Arrow array.
+ Base class for ExtensionArray backed by Arrow ChunkedArray.
"""
_data: pa.ChunkedArray
- def __init__(self, values: pa.ChunkedArray) -> None:
- self._data = values
+ def __init__(self, values: pa.Array | pa.ChunkedArray) -> None:
+ if pa_version_under1p01:
+ msg = "pyarrow>=1.0.0 is required for PyArrow backed ArrowExtensionArray."
+ raise ImportError(msg)
+ if isinstance(values, pa.Array):
+ self._data = pa.chunked_array([values])
+ elif isinstance(values, pa.ChunkedArray):
+ self._data = values
+ else:
+ raise ValueError(
+ f"Unsupported type '{type(values)}' for ArrowExtensionArray"
+ )
+ self._dtype = ArrowDtype(self._data.type)
+
+ @classmethod
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
+ """
+ Construct a new ExtensionArray from a sequence of scalars.
+ """
+ if isinstance(dtype, ArrowDtype):
+ pa_dtype = dtype.pyarrow_dtype
+ elif dtype:
+ pa_dtype = pa.from_numpy_dtype(dtype)
+ else:
+ pa_dtype = None
+
+ if isinstance(scalars, cls):
+ data = scalars._data
+ if pa_dtype:
+ data = data.cast(pa_dtype)
+ return cls(data)
+ else:
+ return cls(
+ pa.chunked_array(pa.array(scalars, type=pa_dtype, from_pandas=True))
+ )
+
+ @classmethod
+ def _from_sequence_of_strings(
+ cls, strings, *, dtype: Dtype | None = None, copy=False
+ ):
+ """
+ Construct a new ExtensionArray from a sequence of strings.
+ """
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
+
+ def __getitem__(self, item: PositionalIndexer):
+ """Select a subset of self.
+
+ Parameters
+ ----------
+ item : int, slice, or ndarray
+ * int: The position in 'self' to get.
+ * slice: A slice object, where 'start', 'stop', and 'step' are
+ integers or None
+ * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
+
+ Returns
+ -------
+ item : scalar or ExtensionArray
+
+ Notes
+ -----
+ For scalar ``item``, return a scalar value suitable for the array's
+ type. This should be an instance of ``self.dtype.type``.
+ For slice ``key``, return an instance of ``ExtensionArray``, even
+ if the slice is length 0 or 1.
+ For a boolean mask, return an instance of ``ExtensionArray``, filtered
+ to the values where ``item`` is True.
+ """
+ item = check_array_indexer(self, item)
+
+ if isinstance(item, np.ndarray):
+ if not len(item):
+ # Removable once we migrate StringDtype[pyarrow] to ArrowDtype[string]
+ if self._dtype.name == "string" and self._dtype.storage == "pyarrow":
+ pa_dtype = pa.string()
+ else:
+ pa_dtype = self._dtype.pyarrow_dtype
+ return type(self)(pa.chunked_array([], type=pa_dtype))
+ elif is_integer_dtype(item.dtype):
+ return self.take(item)
+ elif is_bool_dtype(item.dtype):
+ return type(self)(self._data.filter(item))
+ else:
+ raise IndexError(
+ "Only integers, slices and integer or "
+ "boolean arrays are valid indices."
+ )
+ elif isinstance(item, tuple):
+ item = unpack_tuple_and_ellipses(item)
+
+ # error: Non-overlapping identity check (left operand type:
+ # "Union[Union[int, integer[Any]], Union[slice, List[int],
+ # ndarray[Any, Any]]]", right operand type: "ellipsis")
+ if item is Ellipsis: # type: ignore[comparison-overlap]
+ # TODO: should be handled by pyarrow?
+ item = slice(None)
+
+ if is_scalar(item) and not is_integer(item):
+ # e.g. "foo" or 2.5
+ # exception message copied from numpy
+ raise IndexError(
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
+ r"(`None`) and integer or boolean arrays are valid indices"
+ )
+ # We are not an array indexer, so maybe e.g. a slice or integer
+ # indexer. We dispatch to pyarrow.
+ value = self._data[item]
+ if isinstance(value, pa.ChunkedArray):
+ return type(self)(value)
+ else:
+ scalar = value.as_py()
+ if scalar is None:
+ return self._dtype.na_value
+ else:
+ return scalar
def __arrow_array__(self, type=None):
- """Convert myself to a pyarrow Array or ChunkedArray."""
+ """Convert myself to a pyarrow ChunkedArray."""
return self._data
def equals(self, other) -> bool:
@@ -67,6 +186,13 @@ def equals(self, other) -> bool:
# TODO: is this documented somewhere?
return self._data == other._data
+ @property
+ def dtype(self) -> ArrowDtype:
+ """
+ An instance of 'ExtensionDtype'.
+ """
+ return self._dtype
+
@property
def nbytes(self) -> int:
"""
@@ -377,7 +503,8 @@ def _indexing_key_to_indices(
def _maybe_convert_setitem_value(self, value):
"""Maybe convert value to be pyarrow compatible."""
- raise NotImplementedError()
+ # TODO: Make more robust like ArrowStringArray._maybe_convert_setitem_value
+ return value
def _set_via_chunk_iteration(
self, indices: npt.NDArray[np.intp], value: npt.NDArray[Any]
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index c0ecb0856f27f..6c932f3b94e53 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -1,35 +1,60 @@
from __future__ import annotations
+import re
+
import numpy as np
import pyarrow as pa
+from pandas._libs import missing as libmissing
from pandas._typing import DtypeObj
from pandas.util._decorators import cache_readonly
-from pandas.core.dtypes.base import StorageExtensionDtype
-
-from pandas.core.arrays.arrow import ArrowExtensionArray
+from pandas.core.dtypes.base import (
+ StorageExtensionDtype,
+ register_extension_dtype,
+)
+@register_extension_dtype
class ArrowDtype(StorageExtensionDtype):
"""
- Base class for dtypes for BaseArrowArray subclasses.
+ Base class for dtypes for ArrowExtensionArray.
Modeled after BaseMaskedDtype
"""
- name: str
- base = None
- type: pa.DataType
+ na_value = libmissing.NA
+ _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
- na_value = pa.NA
+ def __init__(self, pyarrow_dtype: pa.DataType) -> None:
+ super().__init__("pyarrow")
+ if not isinstance(pyarrow_dtype, pa.DataType):
+ raise ValueError(
+ f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
+ f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
+ )
+ self.pyarrow_dtype = pyarrow_dtype
- def __init__(self, storage="pyarrow") -> None:
- super().__init__(storage)
+ @property
+ def type(self):
+ """
+ Returns pyarrow.DataType.
+ """
+ return type(self.pyarrow_dtype)
+
+ @property
+ def name(self) -> str: # type: ignore[override]
+ """
+ A string identifying the data type.
+ """
+ return str(self.pyarrow_dtype)
@cache_readonly
def numpy_dtype(self) -> np.dtype:
"""Return an instance of the related numpy dtype"""
- return self.type.to_pandas_dtype()
+ try:
+ return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
+ except (NotImplementedError, TypeError):
+ return np.dtype(object)
@cache_readonly
def kind(self) -> str:
@@ -49,6 +74,8 @@ def construct_array_type(cls):
-------
type
"""
+ from pandas.core.arrays.arrow import ArrowExtensionArray
+
return ArrowExtensionArray
@classmethod
@@ -59,29 +86,52 @@ def construct_from_string(cls, string: str):
Parameters
----------
string : str
+ string should follow the format f"{pyarrow_type}[pyarrow]"
+ e.g. int64[pyarrow]
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
- if string == f"{cls.name}[pyarrow]":
- return cls(storage="pyarrow")
- raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
+ if not string.endswith("[pyarrow]"):
+ raise TypeError(f"string {string} must end with '[pyarrow]'")
+ base_type = string.split("[pyarrow]")[0]
+ pa_dtype = getattr(pa, base_type, None)
+ if pa_dtype is None:
+ has_parameters = re.search(r"\[.*\]", base_type)
+ if has_parameters:
+ raise NotImplementedError(
+ "Passing pyarrow type specific parameters "
+ f"({has_parameters.group()}) in the string is not supported. "
+ "Please construct an ArrowDtype object with a pyarrow_dtype "
+ "instance with specific parameters."
+ )
+ raise TypeError(f"'{base_type}' is not a valid pyarrow data type.")
+ return cls(pa_dtype())
+
+ @property
+ def _is_numeric(self) -> bool:
+ """
+ Whether columns with this dtype should be considered numeric.
+ """
+ # TODO: pa.types.is_boolean?
+ return (
+ pa.types.is_integer(self.pyarrow_dtype)
+ or pa.types.is_floating(self.pyarrow_dtype)
+ or pa.types.is_decimal(self.pyarrow_dtype)
+ )
- @classmethod
- def from_numpy_dtype(cls, dtype: np.dtype) -> ArrowDtype:
+ @property
+ def _is_boolean(self) -> bool:
"""
- Construct the ArrowDtype corresponding to the given numpy dtype.
+ Whether this dtype should be considered boolean.
"""
- # TODO: This may be incomplete
- pa_dtype = pa.from_numpy_dtype(dtype)
- if pa_dtype is cls.type:
- return cls()
- raise NotImplementedError(dtype)
+ return pa.types.is_boolean(self.pyarrow_dtype)
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# We unwrap any masked dtypes, find the common dtype we would use
# for that, then re-mask the result.
+ # Mirrors BaseMaskedDtype
from pandas.core.dtypes.cast import find_common_type
new_dtype = find_common_type(
@@ -91,11 +141,11 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
]
)
if not isinstance(new_dtype, np.dtype):
- # If we ever support e.g. Masked[DatetimeArray] then this will change
return None
try:
- return type(self).from_numpy_dtype(new_dtype)
- except (KeyError, NotImplementedError):
+ pa_dtype = pa.from_numpy_dtype(new_dtype)
+ return type(self)(pa_dtype)
+ except NotImplementedError:
return None
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 21b5dc625956e..45683d83a1303 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -64,9 +64,6 @@ class StringDtype(StorageExtensionDtype):
StringDtype is considered experimental. The implementation and
parts of the API may change without warning.
- In particular, StringDtype.na_value may change to no longer be
- ``pd.NA``.
-
Parameters
----------
storage : {"python", "pyarrow"}, optional
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 8b6f1ffcfa59b..a07f748fa0c8c 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -2,10 +2,7 @@
from collections.abc import Callable # noqa: PDF001
import re
-from typing import (
- Union,
- overload,
-)
+from typing import Union
import numpy as np
@@ -16,10 +13,7 @@
from pandas._typing import (
Dtype,
NpDtype,
- PositionalIndexer,
Scalar,
- ScalarIndexer,
- SequenceIndexer,
npt,
)
from pandas.compat import (
@@ -32,7 +26,6 @@
from pandas.core.dtypes.common import (
is_bool_dtype,
is_dtype_equal,
- is_integer,
is_integer_dtype,
is_object_dtype,
is_scalar,
@@ -50,10 +43,6 @@
BaseStringArray,
StringDtype,
)
-from pandas.core.indexers import (
- check_array_indexer,
- unpack_tuple_and_ellipses,
-)
from pandas.core.strings.object_array import ObjectStringArrayMixin
if not pa_version_under1p01:
@@ -76,7 +65,7 @@
def _chk_pyarrow_available() -> None:
if pa_version_under1p01:
- msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray."
+ msg = "pyarrow>=1.0.0 is required for PyArrow backed ArrowExtensionArray."
raise ImportError(msg)
@@ -132,13 +121,9 @@ class ArrowStringArray(
"""
def __init__(self, values) -> None:
+ super().__init__(values)
+ # TODO: Migrate to ArrowDtype instead
self._dtype = StringDtype(storage="pyarrow")
- if isinstance(values, pa.Array):
- self._data = pa.chunked_array([values])
- elif isinstance(values, pa.ChunkedArray):
- self._data = values
- else:
- raise ValueError(f"Unsupported type '{type(values)}' for ArrowStringArray")
if not pa.types.is_string(self._data.type):
raise ValueError(
@@ -174,7 +159,7 @@ def _from_sequence_of_strings(
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@property
- def dtype(self) -> StringDtype:
+ def dtype(self) -> StringDtype: # type: ignore[override]
"""
An instance of 'string[pyarrow]'.
"""
@@ -205,86 +190,6 @@ def to_numpy(
result[mask] = na_value
return result
- @overload
- def __getitem__(self, item: ScalarIndexer) -> ArrowStringScalarOrNAT:
- ...
-
- @overload
- def __getitem__(self: ArrowStringArray, item: SequenceIndexer) -> ArrowStringArray:
- ...
-
- def __getitem__(
- self: ArrowStringArray, item: PositionalIndexer
- ) -> ArrowStringArray | ArrowStringScalarOrNAT:
- """Select a subset of self.
-
- Parameters
- ----------
- item : int, slice, or ndarray
- * int: The position in 'self' to get.
- * slice: A slice object, where 'start', 'stop', and 'step' are
- integers or None
- * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
-
- Returns
- -------
- item : scalar or ExtensionArray
-
- Notes
- -----
- For scalar ``item``, return a scalar value suitable for the array's
- type. This should be an instance of ``self.dtype.type``.
- For slice ``key``, return an instance of ``ExtensionArray``, even
- if the slice is length 0 or 1.
- For a boolean mask, return an instance of ``ExtensionArray``, filtered
- to the values where ``item`` is True.
- """
- item = check_array_indexer(self, item)
-
- if isinstance(item, np.ndarray):
- if not len(item):
- return type(self)(pa.chunked_array([], type=pa.string()))
- elif is_integer_dtype(item.dtype):
- return self.take(item)
- elif is_bool_dtype(item.dtype):
- return type(self)(self._data.filter(item))
- else:
- raise IndexError(
- "Only integers, slices and integer or "
- "boolean arrays are valid indices."
- )
- elif isinstance(item, tuple):
- item = unpack_tuple_and_ellipses(item)
-
- # error: Non-overlapping identity check (left operand type:
- # "Union[Union[int, integer[Any]], Union[slice, List[int],
- # ndarray[Any, Any]]]", right operand type: "ellipsis")
- if item is Ellipsis: # type: ignore[comparison-overlap]
- # TODO: should be handled by pyarrow?
- item = slice(None)
-
- if is_scalar(item) and not is_integer(item):
- # e.g. "foo" or 2.5
- # exception message copied from numpy
- raise IndexError(
- r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
- r"(`None`) and integer or boolean arrays are valid indices"
- )
- # We are not an array indexer, so maybe e.g. a slice or integer
- # indexer. We dispatch to pyarrow.
- value = self._data[item]
- if isinstance(value, pa.ChunkedArray):
- return type(self)(value)
- else:
- return self._as_pandas_scalar(value)
-
- def _as_pandas_scalar(self, arrow_scalar: pa.Scalar):
- scalar = arrow_scalar.as_py()
- if scalar is None:
- return self._dtype.na_value
- else:
- return scalar
-
def _cmp_method(self, other, op):
from pandas.arrays import BooleanArray
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 9762b779477e4..cffac15ef6496 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -408,7 +408,7 @@ def __str__(self):
return self.name
def __eq__(self, other: Any) -> bool:
- if isinstance(other, self.type) and other == self.name:
+ if isinstance(other, str) and other == self.name:
return True
return super().__eq__(other)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index e9d48eb937b36..b563f84207b22 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -262,7 +262,7 @@ def test_constructor_raises(cls):
if cls is pd.arrays.StringArray:
msg = "StringArray requires a sequence of strings or pandas.NA"
else:
- msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray"
+ msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowExtensionArray"
with pytest.raises(ValueError, match=msg):
cls(np.array(["a", "b"], dtype="S1"))
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index de1b7a9c603af..f43cf298857a0 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -59,7 +59,7 @@ def test_constructor_not_string_type_raises(array, chunked):
pytest.skip("chunked not applicable to numpy array")
arr = pa.chunked_array(arr)
if array is np:
- msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray"
+ msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowExtensionArray"
else:
msg = re.escape(
"ArrowStringArray requires a PyArrow (chunked) array of string type"
@@ -122,7 +122,7 @@ def test_from_sequence_wrong_dtype_raises():
reason="pyarrow is installed",
)
def test_pyarrow_not_installed_raises():
- msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed StringArray")
+ msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed")
with pytest.raises(ImportError, match=msg):
StringDtype(storage="pyarrow")
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index d19a6245809be..22595c4e461d7 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -185,7 +185,7 @@ def __init__(self, values) -> None:
assert values.type == pa.bool_()
self._data = values
- self._dtype = ArrowBoolDtype()
+ self._dtype = ArrowBoolDtype() # type: ignore[assignment]
class ArrowStringArray(ArrowExtensionArray):
@@ -195,4 +195,4 @@ def __init__(self, values) -> None:
assert values.type == pa.string()
self._data = values
- self._dtype = ArrowStringDtype()
+ self._dtype = ArrowStringDtype() # type: ignore[assignment]
diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py
index b2750784ab3d6..5b81940e5a6c0 100644
--- a/pandas/tests/extension/arrow/test_timestamp.py
+++ b/pandas/tests/extension/arrow/test_timestamp.py
@@ -46,7 +46,7 @@ def __init__(self, values) -> None:
assert values.type == pa.timestamp("us")
self._data = values
- self._dtype = ArrowTimestampUSDtype()
+ self._dtype = ArrowTimestampUSDtype() # type: ignore[assignment]
def test_constructor_extensionblock():
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
new file mode 100644
index 0000000000000..4047c0db1fee4
--- /dev/null
+++ b/pandas/tests/extension/test_arrow.py
@@ -0,0 +1,184 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+"""
+
+from datetime import (
+ date,
+ datetime,
+ time,
+ timedelta,
+)
+
+import pytest
+
+from pandas.compat import (
+ pa_version_under2p0,
+ pa_version_under3p0,
+)
+
+import pandas as pd
+import pandas._testing as tm
+from pandas.tests.extension import base
+
+pa = pytest.importorskip("pyarrow", minversion="1.0.1")
+
+from pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip
+
+
+@pytest.fixture(params=tm.ALL_PYARROW_DTYPES)
+def dtype(request):
+ return ArrowDtype(pyarrow_dtype=request.param)
+
+
+@pytest.fixture
+def data(dtype):
+ pa_dtype = dtype.pyarrow_dtype
+ if pa.types.is_boolean(pa_dtype):
+ data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False]
+ elif pa.types.is_floating(pa_dtype):
+ data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5]
+ elif pa.types.is_signed_integer(pa_dtype):
+ data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99]
+ elif pa.types.is_unsigned_integer(pa_dtype):
+ data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99]
+ elif pa.types.is_date(pa_dtype):
+ data = (
+ [date(2022, 1, 1), date(1999, 12, 31)] * 4
+ + [None]
+ + [date(2022, 1, 1), date(2022, 1, 1)] * 44
+ + [None]
+ + [date(1999, 12, 31), date(1999, 12, 31)]
+ )
+ elif pa.types.is_timestamp(pa_dtype):
+ data = (
+ [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4
+ + [None]
+ + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44
+ + [None]
+ + [datetime(2020, 1, 1), datetime(1999, 1, 1)]
+ )
+ elif pa.types.is_duration(pa_dtype):
+ data = (
+ [timedelta(1), timedelta(1, 1)] * 4
+ + [None]
+ + [timedelta(-1), timedelta(0)] * 44
+ + [None]
+ + [timedelta(-10), timedelta(10)]
+ )
+ elif pa.types.is_time(pa_dtype):
+ data = (
+ [time(12, 0), time(0, 12)] * 4
+ + [None]
+ + [time(0, 0), time(1, 1)] * 44
+ + [None]
+ + [time(0, 5), time(5, 0)]
+ )
+ else:
+ raise NotImplementedError
+ return pd.array(data, dtype=dtype)
+
+
+@pytest.fixture
+def data_missing(data):
+ """Length-2 array with [NA, Valid]"""
+ return type(data)._from_sequence([None, data[0]])
+
+
+@pytest.fixture
+def na_value():
+ """The scalar missing value for this type. Default 'None'"""
+ return pd.NA
+
+
+class TestConstructors(base.BaseConstructorsTests):
+ @pytest.mark.xfail(
+ reason=(
+ "str(dtype) constructs "
+ "e.g. in64[pyarrow] like int64 (numpy) "
+ "due to StorageExtensionDtype.__str__"
+ )
+ )
+ def test_from_dtype(self, data):
+ super().test_from_dtype(data)
+
+
+class TestGetitemTests(base.BaseGetitemTests):
+ @pytest.mark.xfail(
+ reason=(
+ "data.dtype.type return pyarrow.DataType "
+ "but this (intentionally) returns "
+ "Python scalars or pd.Na"
+ )
+ )
+ def test_getitem_scalar(self, data):
+ super().test_getitem_scalar(data)
+
+ def test_take_series(self, request, data):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ bad_units = ["ns"]
+ if pa_version_under2p0:
+ bad_units.extend(["s", "ms", "us"])
+ if pa_version_under3p0 and tz not in (None, "UTC") and unit in bad_units:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"Not supported by pyarrow < 3.0 "
+ f"with timestamp type {tz} and {unit}"
+ )
+ )
+ )
+ super().test_take_series(data)
+
+ def test_reindex(self, request, data, na_value):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ bad_units = ["ns"]
+ if pa_version_under2p0:
+ bad_units.extend(["s", "ms", "us"])
+ if pa_version_under3p0 and tz not in (None, "UTC") and unit in bad_units:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"Not supported by pyarrow < 3.0 "
+ f"with timestamp type {tz} and {unit}"
+ )
+ )
+ )
+ super().test_reindex(data, na_value)
+
+ def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ bad_units = ["ns"]
+ if pa_version_under2p0:
+ bad_units.extend(["s", "ms", "us"])
+ if (
+ pa_version_under3p0
+ and not using_array_manager
+ and tz not in (None, "UTC")
+ and unit in bad_units
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"Not supported by pyarrow < 3.0 "
+ f"with timestamp type {tz} and {unit}"
+ )
+ )
+ )
+ super().test_loc_iloc_frame_single_dtype(data)
+
+
+def test_arrowdtype_construct_from_string_type_with_parameters():
+ with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
+ ArrowDtype.construct_from_string("timestamp[s][pyarrow]")
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Not fully user facing yet.
Supersedes https://github.com/pandas-dev/pandas/pull/46972
cc @jbrockmendel let me know if this is what you had in mind | https://api.github.com/repos/pandas-dev/pandas/pulls/47034 | 2022-05-16T18:53:40Z | 2022-06-09T12:42:59Z | 2022-06-09T12:42:59Z | 2022-08-25T23:44:11Z |
DOC: Fix a typo in documentation for qyear | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 05e71c22052ad..de44f0da8275e 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2202,7 +2202,7 @@ cdef class _Period(PeriodMixin):
2018
If the fiscal year starts in April (`Q-MAR`), the first quarter of
- 2018 will start in April 2017. `year` will then be 2018, but `qyear`
+ 2018 will start in April 2017. `year` will then be 2017, but `qyear`
will be the fiscal year, 2018.
>>> per = pd.Period('2018Q1', freq='Q-MAR')
| This fixes a typo that I ran across in the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Period.qyear.html?highlight=qyear#pandas.Period.qyear). This change corrects the wording to match the given example that is just below. | https://api.github.com/repos/pandas-dev/pandas/pulls/47033 | 2022-05-16T18:13:19Z | 2022-05-16T18:47:38Z | 2022-05-16T18:47:38Z | 2022-05-16T18:47:48Z |
CI: Start Testing on Python 3.11 | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 09639acafbba1..d0895450df818 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -29,7 +29,7 @@ env:
jobs:
build:
- if: false # Comment this line out to "unfreeze"
+ #if: false # Comment this line out to "unfreeze"
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
@@ -55,13 +55,14 @@ jobs:
python-version: '3.11-dev'
- name: Install dependencies
- shell: bash -el {0}
+ #shell: bash -el {0}
run: |
- python3 -m pip install --upgrade pip setuptools wheel
- python3 -m pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
- python3 -m pip install git+https://github.com/nedbat/coveragepy.git
- python3 -m pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17
- python3 -m pip list
+ python -m pip install --upgrade pip setuptools wheel
+ # pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
+ python -m pip install git+https://github.com/nedbat/coveragepy.git
+ python -m pip install git+https://github.com/numpy/numpy.git
+ python -m pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio
+ python -m pip list
- name: Build Pandas
run: |
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index bb4787f07b2f0..5d3a36a902c05 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -28,6 +28,7 @@
PY39 = sys.version_info >= (3, 9)
PY310 = sys.version_info >= (3, 10)
+PY311 = sys.version_info >= (3, 11)
PYPY = platform.python_implementation() == "PyPy"
IS64 = sys.maxsize > 2**32
diff --git a/pandas/conftest.py b/pandas/conftest.py
index dfe8c5f1778d3..4253e15f0bd5c 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -79,6 +79,14 @@
# Import "zoneinfo" could not be resolved (reportMissingImports)
import zoneinfo # type: ignore[no-redef]
+ # Although zoneinfo can be imported in Py39, it is effectively
+ # "not available" without tzdata/IANA tz data.
+ # We will set zoneinfo to not found in this case
+ try:
+ utc_zoneinfo = zoneinfo.ZoneInfo("UTC")
+ except zoneinfo.ZoneInfoNotFoundError:
+ zoneinfo = None
+
# Until https://github.com/numpy/numpy/issues/19078 is sorted out, just suppress
suppress_npdev_promotion_warning = pytest.mark.filterwarnings(
"ignore:Promotion of numbers and bools:FutureWarning"
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index 47f1052808e0c..57db65b42464b 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -12,6 +12,7 @@
import numpy as np
import pytest
+from pandas.compat import PY311
from pandas.errors import (
EmptyDataError,
ParserError,
@@ -230,7 +231,7 @@ def test_null_byte_char(all_parsers):
names = ["a", "b"]
parser = all_parsers
- if parser.engine == "c":
+ if parser.engine == "c" or PY311:
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py
index 456dd049d2f4a..d47d78da0d905 100644
--- a/pandas/tests/io/parser/test_quoting.py
+++ b/pandas/tests/io/parser/test_quoting.py
@@ -80,7 +80,10 @@ def test_null_quote_char(all_parsers, quoting, quote_char):
if quoting != csv.QUOTE_NONE:
# Sanity checking.
- msg = "quotechar must be set if quoting enabled"
+ msg = (
+ r"(quotechar must be set if quoting enabled|"
+ r'"quotechar" must be a 1-character string)'
+ )
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 873103b01f64d..2e7a3ef966a25 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -220,7 +220,7 @@ def test_missing_public_nat_methods(klass, expected):
assert missing == expected
-def _get_overlap_public_nat_methods(klass, as_tuple=False):
+def _get_overlap_public_nat_methods(klass):
"""
Get overlapping public methods between NaT and another class.
@@ -228,8 +228,6 @@ def _get_overlap_public_nat_methods(klass, as_tuple=False):
----------
klass : type
The class to compare with NaT
- as_tuple : bool, default False
- Whether to return a list of tuples of the form (klass, method).
Returns
-------
@@ -249,9 +247,6 @@ def _get_overlap_public_nat_methods(klass, as_tuple=False):
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
- if as_tuple:
- overlap = [(klass, method) for method in overlap]
-
overlap.sort()
return overlap
@@ -315,30 +310,28 @@ def test_overlap_public_nat_methods(klass, expected):
@pytest.mark.parametrize(
- "compare",
- (
- _get_overlap_public_nat_methods(Timestamp, True)
- + _get_overlap_public_nat_methods(Timedelta, True)
- ),
+ "klass",
+ (Timestamp, Timedelta),
)
-def test_nat_doc_strings(compare):
+def test_nat_doc_strings(klass):
# see gh-17327
#
# The docstrings for overlapping methods should match.
- klass, method = compare
- klass_doc = getattr(klass, method).__doc__
+ methods = _get_overlap_public_nat_methods(klass)
+ for method in methods:
+ klass_doc = getattr(klass, method).__doc__
- # Ignore differences with Timestamp.isoformat() as they're intentional
- if klass == Timestamp and method == "isoformat":
- return
+ # Ignore differences with Timestamp.isoformat() as they're intentional
+ if klass == Timestamp and method == "isoformat":
+ return
- if method == "to_numpy":
- # GH#44460 can return either dt64 or td64 depending on dtype,
- # different docstring is intentional
- return
+ if method == "to_numpy":
+ # GH#44460 can return either dt64 or td64 depending on dtype,
+ # different docstring is intentional
+ return
- nat_doc = getattr(NaT, method).__doc__
- assert klass_doc == nat_doc
+ nat_doc = getattr(NaT, method).__doc__
+ assert klass_doc == nat_doc
_ops = {
| - [ ] xref #46680 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
It's that time of the year again.
Surprisingly, there are not a lot of failures this year. | https://api.github.com/repos/pandas-dev/pandas/pulls/47032 | 2022-05-15T17:51:01Z | 2022-07-30T18:24:07Z | null | 2022-07-30T18:24:08Z |
Backport PR #47015 on branch 1.4.x (CI: Ensure no-use-pep517 with no-build-isolation with new pip version) | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 483353cfcb3cd..80448319f7918 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -104,6 +104,6 @@ echo "Build extensions"
python setup.py build_ext -q -j3
echo "Install pandas"
-python -m pip install --no-build-isolation -e .
+python -m pip install --no-build-isolation --no-use-pep517 -e .
echo "done"
| Backport PR #47015: CI: Ensure no-use-pep517 with no-build-isolation with new pip version | https://api.github.com/repos/pandas-dev/pandas/pulls/47031 | 2022-05-15T13:50:48Z | 2022-05-15T15:22:52Z | 2022-05-15T15:22:52Z | 2022-05-15T15:22:52Z |
Backport PR #47020 on branch 1.4.x (CI: Move 32 bit Linux build to GHA) | diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
new file mode 100644
index 0000000000000..500e800a082d9
--- /dev/null
+++ b/.github/workflows/32-bit-linux.yml
@@ -0,0 +1,43 @@
+name: 32 Bit Linux
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+jobs:
+ pytest:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Run 32-bit manylinux2014 Docker Build / Tests
+ run: |
+ docker pull quay.io/pypa/manylinux2014_i686
+ docker run --platform linux/386 -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
+ /bin/bash -xc "cd pandas && \
+ /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
+ . ~/virtualenvs/pandas-dev/bin/activate && \
+ python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ python setup.py build_ext -q -j2 && \
+ python -m pip install --no-build-isolation --no-use-pep517 -e . && \
+ export PANDAS_CI=1 && \
+ pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
+
+ - name: Publish test results for Python 3.8-32 bit full Linux
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 8ca4cce155e96..23a48e567dfe9 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -2,7 +2,7 @@
# Unfreeze(by commentingthe if: false() condition) once the
# next Python Dev version has released beta 1 and both Cython and numpy support it
# After that Python has released, migrate the workflows to the
-# posix GHA workflows/Azure pipelines and "freeze" this file by
+# posix GHA workflows and "freeze" this file by
# uncommenting the if: false() condition
# Feel free to modify this comment as necessary.
diff --git a/README.md b/README.md
index 26aed081de4af..4eb983cfb24e8 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,6 @@
[](https://doi.org/10.5281/zenodo.3509134)
[](https://pypi.org/project/pandas/)
[](https://github.com/pandas-dev/pandas/blob/main/LICENSE)
-[](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=main)
[](https://codecov.io/gh/pandas-dev/pandas)
[](https://pepy.tech/project/pandas)
[](https://gitter.im/pydata/pandas)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 0c6195ff6924b..0000000000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
-trigger:
- branches:
- include:
- - main
- - 1.4.x
- paths:
- exclude:
- - 'doc/*'
-
-pr:
- autoCancel: true
- branches:
- include:
- - main
- - 1.4.x
-
-variables:
- PYTEST_WORKERS: auto
- PYTEST_TARGET: pandas
- PATTERN: "not slow and not high_memory and not db and not network and not single_cpu"
- PANDAS_CI: 1
-
-jobs:
-- job: py38_32bit
- pool:
- vmImage: ubuntu-18.04
-
- steps:
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- - script: |
- docker pull quay.io/pypa/manylinux2014_i686
- docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
- /bin/bash -xc "cd pandas && \
- /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
- . ~/virtualenvs/pandas-dev/bin/activate && \
- python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
- pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
- python setup.py build_ext -q -j2 && \
- python -m pip install --no-build-isolation -e . && \
- export PANDAS_CI=1 && \
- pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
- displayName: 'Run 32-bit manylinux2014 Docker Build / Tests'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python 3.8-32 bit full Linux'
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index 4826921d4866b..6a8f07663578e 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -443,13 +443,11 @@ library. This makes type checkers aware of the type annotations shipped with pan
Testing with continuous integration
-----------------------------------
-The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ and
-`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__
+The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__
continuous integration services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
then the continuous integration services need to be hooked to your GitHub repository. Instructions are here
-for `GitHub Actions <https://docs.github.com/en/actions/>`__ and
-`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/?view=azure-devops>`__.
+for `GitHub Actions <https://docs.github.com/en/actions/>`__.
A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing,
then you will get a red 'X', where you can click through to see the individual failed tests.
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 958df72b3f607..148f8bea16b0c 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -649,7 +649,7 @@ def index_with_missing(request):
"""
# GH 35538. Use deep copy to avoid illusive bug on np-dev
- # Azure pipeline that writes into indices_dict despite copy
+ # GHA pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index ff31d93947776..522d25205eeb0 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -71,7 +71,7 @@ def s3_base(worker_id):
if is_platform_arm() or is_platform_mac() or is_platform_windows():
# NOT RUN on Windows/MacOS/ARM, only Ubuntu
# - subprocess in CI can cause timeouts
- # - Azure pipelines/Github Actions do not support
+ # - Github Actions do not support
# container services for the above OSs
# - CircleCI will probably hit the Docker rate pull limit
pytest.skip(
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 9fe7ae7a5bb90..cf63ab2fe31c7 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -21,7 +21,7 @@
# TODO(GH#44584): Mark these as pytest.mark.single_cpu
pytestmark = pytest.mark.skipif(
is_ci_environment() and (is_platform_windows() or is_platform_mac()),
- reason="On Azure CI, Windows can fail with "
+ reason="On GHA CI, Windows can fail with "
"'Windows fatal exception: stack overflow' "
"and MacOS can timeout",
)
diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py
index ab435a39a497b..b98129e1b07ec 100644
--- a/pandas/tests/window/test_online.py
+++ b/pandas/tests/window/test_online.py
@@ -17,7 +17,7 @@
# TODO(GH#44584): Mark these as pytest.mark.single_cpu
pytestmark = pytest.mark.skipif(
is_ci_environment() and (is_platform_windows() or is_platform_mac()),
- reason="On Azure CI, Windows can fail with "
+ reason="On GHA CI, Windows can fail with "
"'Windows fatal exception: stack overflow' "
"and MacOS can timeout",
)
| Backport PR #47020 | https://api.github.com/repos/pandas-dev/pandas/pulls/47029 | 2022-05-15T11:33:59Z | 2022-05-15T13:53:41Z | 2022-05-15T13:53:41Z | 2022-05-15T13:53:45Z |
Fix: Pandas rolling removes imaginary part of complex | diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 68c05f2bb2c98..a81c9182c04d2 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -14,6 +14,7 @@ import numpy as np
cimport numpy as cnp
from numpy cimport (
+ complex64_t,
float32_t,
float64_t,
int64_t,
@@ -65,6 +66,10 @@ cdef bint is_monotonic_increasing_start_end_bounds(
):
return is_monotonic(start, False)[0] and is_monotonic(end, False)[0]
+ctypedef fused float_complex_types:
+ float64_t
+ complex64_t
+
# ----------------------------------------------------------------------
# Rolling sum
@@ -129,7 +134,7 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x,
sum_x[0] = t
-def roll_sum(const float64_t[:] values, ndarray[int64_t] start,
+def roll_sum(float_complex_types[:] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
@@ -151,25 +156,39 @@ def roll_sum(const float64_t[:] values, ndarray[int64_t] start,
e = end[i]
if i == 0 or not is_monotonic_increasing_bounds or s >= end[i - 1]:
-
+ if float_complex_types is complex64_t:
+ prev_value = values[s].real
+ else:
+ prev_value = values[s]
# setup
- prev_value = values[s]
num_consecutive_same_value = 0
sum_x = compensation_add = compensation_remove = 0
nobs = 0
for j in range(s, e):
- add_sum(values[j], &nobs, &sum_x, &compensation_add,
+ if float_complex_types is complex64_t:
+ prev_value = values[j].real
+ else:
+ prev_value = values[j]
+ add_sum(prev_value, &nobs, &sum_x, &compensation_add,
&num_consecutive_same_value, &prev_value)
else:
# calculate deletes
+ if float_complex_types is complex64_t:
+ prev_value = values[j].real
+ else:
+ prev_value = values[j]
for j in range(start[i - 1], s):
- remove_sum(values[j], &nobs, &sum_x, &compensation_remove)
+ remove_sum(prev_value, &nobs, &sum_x, &compensation_remove)
# calculate adds
for j in range(end[i - 1], e):
- add_sum(values[j], &nobs, &sum_x, &compensation_add,
+ if float_complex_types is complex64_t:
+ prev_value = values[j].real
+ else:
+ prev_value = values[j]
+ add_sum(prev_value, &nobs, &sum_x, &compensation_add,
&num_consecutive_same_value, &prev_value)
output[i] = calc_sum(minp, nobs, sum_x, num_consecutive_same_value, prev_value)
@@ -251,7 +270,7 @@ cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x,
neg_ct[0] = neg_ct[0] - 1
-def roll_mean(const float64_t[:] values, ndarray[int64_t] start,
+def roll_mean(float_complex_types[:] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
float64_t val, compensation_add, compensation_remove, sum_x, prev_value
@@ -276,10 +295,18 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start,
# setup
compensation_add = compensation_remove = sum_x = 0
nobs = neg_ct = 0
- prev_value = values[s]
+
+ if float_complex_types is complex64_t:
+ prev_value = values[s].real
+ else:
+ prev_value = values[s]
+
num_consecutive_same_value = 0
for j in range(s, e):
- val = values[j]
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add,
&num_consecutive_same_value, &prev_value)
@@ -287,12 +314,18 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start,
# calculate deletes
for j in range(start[i - 1], s):
- val = values[j]
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
remove_mean(val, &nobs, &sum_x, &neg_ct, &compensation_remove)
# calculate adds
for j in range(end[i - 1], e):
- val = values[j]
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add,
&num_consecutive_same_value, &prev_value)
@@ -387,14 +420,14 @@ cdef inline void remove_var(float64_t val, float64_t *nobs, float64_t *mean_x,
ssqdm_x[0] = 0
-def roll_var(const float64_t[:] values, ndarray[int64_t] start,
+def roll_var(float_complex_types[:] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp, int ddof=1) -> np.ndarray:
"""
Numerically stable implementation using Welford's method.
"""
cdef:
float64_t mean_x, ssqdm_x, nobs, compensation_add,
- float64_t compensation_remove, prev_value
+ float64_t compensation_remove, prev_value, val
int64_t s, e, num_consecutive_same_value
Py_ssize_t i, j, N = len(start)
ndarray[float64_t] output
@@ -417,12 +450,20 @@ def roll_var(const float64_t[:] values, ndarray[int64_t] start,
# never removed
if i == 0 or not is_monotonic_increasing_bounds or s >= end[i - 1]:
- prev_value = values[s]
+ if float_complex_types is complex64_t:
+ prev_value = values[s].real
+ else:
+ prev_value = values[s]
+
num_consecutive_same_value = 0
mean_x = ssqdm_x = nobs = compensation_add = compensation_remove = 0
for j in range(s, e):
- add_var(values[j], &nobs, &mean_x, &ssqdm_x, &compensation_add,
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
+ add_var(val, &nobs, &mean_x, &ssqdm_x, &compensation_add,
&num_consecutive_same_value, &prev_value)
else:
@@ -432,12 +473,20 @@ def roll_var(const float64_t[:] values, ndarray[int64_t] start,
# calculate deletes
for j in range(start[i - 1], s):
- remove_var(values[j], &nobs, &mean_x, &ssqdm_x,
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
+ remove_var(val, &nobs, &mean_x, &ssqdm_x,
&compensation_remove)
# calculate adds
for j in range(end[i - 1], e):
- add_var(values[j], &nobs, &mean_x, &ssqdm_x, &compensation_add,
+ if float_complex_types is complex64_t:
+ val = values[j].real
+ else:
+ val = values[j]
+ add_var(val, &nobs, &mean_x, &ssqdm_x, &compensation_add,
&num_consecutive_same_value, &prev_value)
output[i] = calc_var(minp, ddof, nobs, ssqdm_x, num_consecutive_same_value)
@@ -562,7 +611,7 @@ cdef inline void remove_skew(float64_t val, int64_t *nobs,
xxx[0] = t
-def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
+def roll_skew(ndarray[float_complex_types] fused_values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
@@ -572,10 +621,16 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t compensation_x_add, compensation_x_remove
float64_t x, xx, xxx
float64_t prev_value
- int64_t nobs = 0, N = len(start), V = len(values), nobs_mean = 0
+ ndarray[float64_t] values
+ int64_t nobs = 0, N = len(start), V = len(fused_values), nobs_mean = 0
int64_t s, e, num_consecutive_same_value
ndarray[float64_t] output, mean_array, values_copy
- bint is_monotonic_increasing_bounds
+ bint is_monotonic_increasing_bound
+
+ if float_complex_types is complex64_t:
+ values = fused_values.real.astype(float)
+ else:
+ values = fused_values
minp = max(minp, 3)
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
@@ -775,7 +830,7 @@ cdef inline void remove_kurt(float64_t val, int64_t *nobs,
xxxx[0] = t
-def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
+def roll_kurt(ndarray[float_complex_types] fused_values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
@@ -786,11 +841,17 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t compensation_x_remove, compensation_x_add
float64_t x, xx, xxx, xxxx
float64_t prev_value
+ ndarray[float64_t] values
int64_t nobs, s, e, num_consecutive_same_value
- int64_t N = len(start), V = len(values), nobs_mean = 0
+ int64_t N = len(start), V = len(fused_values), nobs_mean = 0
ndarray[float64_t] output, values_copy
bint is_monotonic_increasing_bounds
+ if float_complex_types is complex64_t:
+ values = fused_values.real.astype(float)
+ else:
+ values = fused_values
+
minp = max(minp, 4)
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 9e8f95cf340c4..e6b20cb085539 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -40,6 +40,7 @@
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
+ is_complex_dtype,
is_integer,
is_list_like,
is_scalar,
@@ -363,7 +364,10 @@ def _prep_values(self, values: ArrayLike) -> np.ndarray:
if isinstance(values, ExtensionArray):
values = values.to_numpy(np.float64, na_value=np.nan)
else:
- values = ensure_float64(values)
+ if is_complex_dtype(values):
+ values = values.astype(np.complex64)
+ else:
+ values = ensure_float64(values)
except (ValueError, TypeError) as err:
raise TypeError(f"cannot handle this type -> {values.dtype}") from err
@@ -601,7 +605,7 @@ def calc(x):
step=self.step,
)
self._check_window_bounds(start, end, len(x))
-
+ # x = ensure_float64(x)
return func(x, start, end, min_periods, *numba_args)
with np.errstate(all="ignore"):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 4c26cfb95fd85..49eefa66f802b 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1871,3 +1871,18 @@ def test_rolling_skew_kurt_floating_artifacts():
assert (result[-2:] == 0).all()
result = r.kurt()
assert (result[-2:] == -3).all()
+
+
+def test_rolling_imaginary_part_of_complex(arithmetic_win_operators):
+ # GH 46619
+ func_name = arithmetic_win_operators
+ df = DataFrame([1j, 1 + 2j])
+ result = getattr(
+ df.rolling(2),
+ func_name,
+ )()
+ expected = getattr(
+ DataFrame([0, 1]).rolling(2),
+ func_name,
+ )()
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #46619
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47028 | 2022-05-15T09:59:17Z | 2022-06-06T11:51:03Z | null | 2022-06-06T11:51:03Z |
WIP: ENH Add float[pyarrow] dtype | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 3645e8744d8af..7d266566528d9 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -55,6 +55,9 @@
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
+ Float16ArrowDtype,
+ Float32ArrowDtype,
+ Float64ArrowDtype,
Float32Dtype,
Float64Dtype,
CategoricalDtype,
@@ -317,6 +320,9 @@ def __getattr__(name):
"ExcelFile",
"ExcelWriter",
"Flags",
+ "Float16ArrowDtype",
+ "Float32ArrowDtype",
+ "Float64ArrowDtype",
"Float32Dtype",
"Float64Dtype",
"Grouper",
diff --git a/pandas/core/api.py b/pandas/core/api.py
index cf082d2013d3b..3c49cbd5a16ad 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -27,6 +27,11 @@
value_counts,
)
from pandas.core.arrays import Categorical
+from pandas.core.arrays.arrow.floating import (
+ Float16ArrowDtype,
+ Float32ArrowDtype,
+ Float64ArrowDtype,
+)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import (
Float32Dtype,
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index fdd505e259dd9..47b4cda39962a 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -39,6 +39,7 @@
import pyarrow.compute as pc
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
+ from pandas.core.arrays.arrow.dtype import ArrowDtype
if TYPE_CHECKING:
from pandas import Series
@@ -52,9 +53,11 @@ class ArrowExtensionArray(ExtensionArray):
"""
_data: pa.ChunkedArray
+ _dtype: pa.DataType
- def __init__(self, values: pa.ChunkedArray) -> None:
+ def __init__(self, values: pa.ChunkedArray, pa_dtype: pa.DataType) -> None:
self._data = values
+ self._dtype = ArrowDtype(pa_dtype=pa_dtype, storage="pyarrow")
def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow Array or ChunkedArray."""
@@ -468,3 +471,51 @@ def _replace_with_indices(
return pc.if_else(mask, None, chunk)
return pc.replace_with_mask(chunk, mask, value)
+
+ @property
+ def dtype(self) -> ArrowDtype:
+ return self._dtype
+
+ @classmethod
+ def _from_sequence_of_strings(
+ self, cls, strings, *, dtype=None, copy: bool = False
+ ):
+ if self.dtype._is_numeric:
+ from pandas.core.tools.numeric import to_numeric
+
+ scalars = to_numeric(strings, errors="raise")
+ elif self.dtype._is_temporal:
+ from pandas.core.tools.datetimes import to_datetime
+
+ scalars = to_datetime(strings, error="raise")
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
+
+ def mean(self, skipna: bool = True):
+ if self.dtype._is_numeric:
+ return pa.compute.mean(self._data, skip_nulls=skipna)
+ else:
+ raise TypeError("Cannot compute mean")
+
+ def max(self, skipna: bool = True):
+ if self.dtype._is_numeric:
+ return pa.compute.max(self._data, skip_nulls=skipna)
+ else:
+ raise TypeError("Cannot compute max")
+
+ def min(self, skipna: bool = True):
+ if self.dtype._is_numeric:
+ return pa.compute.min(self._data, skip_nulls=skipna)
+ else:
+ raise TypeError("Cannot compute min")
+
+ def mode(self, skipna: bool = True):
+ if self.dtype._is_numeric:
+ return pa.compute.mode(self._data, skip_nulls=skipna)
+ else:
+ raise TypeError("Cannot compute mode")
+
+ def quantile(self, q: float = 0.5, interpolation: str = "linear"):
+ if self.dtype._is_numeric:
+ return pa.compute.quantile(self._data, q=q, interpolation=interpolation)
+ else:
+ raise TypeError("Cannot compute quantile")
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index c0ecb0856f27f..4aebb5a6f3e97 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -23,9 +23,38 @@ class ArrowDtype(StorageExtensionDtype):
na_value = pa.NA
- def __init__(self, storage="pyarrow") -> None:
+ def __init__(self, pa_dtype, storage="pyarrow") -> None:
+ self.pa_dtype = pa_dtype
+ self.storage = storage
super().__init__(storage)
+ def _is_numeric(self):
+ return pa.types.is_integer(self.pa_dtype) or pa.types.is_float(self.pa_dtype)
+
+ def _is_integer(self):
+ return pa.types.is_integer(self.pa_dtype)
+
+ def _is_boolean(self):
+ return pa.types.is_boolean(self.pa_dtype)
+
+ def _is_floating(self):
+ return pa.types.is_floating(self.pa_dtype)
+
+ def _is_temporal(self):
+ return pa.types.is_temporal(self.pa_dtype)
+
+ def _is_timestamp(self):
+ return pa.types.is_timestamp(self.pa_dtype)
+
+ def _is_date(self):
+ return pa.types.is_date(self.pa_dtype)
+
+ def _is_time(self):
+ return pa.types.is_time(self.pa_dtype)
+
+ def _is_string(self):
+ return pa.types.is_string(self.pa_dtype)
+
@cache_readonly
def numpy_dtype(self) -> np.dtype:
"""Return an instance of the related numpy dtype"""
diff --git a/pandas/core/arrays/arrow/floating.py b/pandas/core/arrays/arrow/floating.py
new file mode 100644
index 0000000000000..504822fae5dee
--- /dev/null
+++ b/pandas/core/arrays/arrow/floating.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import pyarrow as pa
+
+from pandas.core.dtypes.dtypes import register_extension_dtype
+
+from pandas.core.arrays.arrow.numeric import FloatingArrowDtype
+
+_dtype_docstring = """
+An ExtensionDtype for {dtype} data.
+
+This dtype uses ``pa.null`` as missing value indicator.
+
+Attributes
+----------
+None
+
+Methods
+-------
+None
+"""
+
+
+@register_extension_dtype
+class Float16ArrowDtype(FloatingArrowDtype):
+ name = "float16"
+ type = pa.float16()
+ __doc__ = _dtype_docstring.format(dtype="float16")
+ _dtype_checker = pa.is_float16()
+
+
+@register_extension_dtype
+class Float32ArrowDtype(FloatingArrowDtype):
+ name = "float32"
+ type = pa.float32()
+ __doc__ = _dtype_docstring.format(dtype="float32")
+ _dtype_checker = pa.is_float32()
+
+
+@register_extension_dtype
+class Float64ArrowDtype(FloatingArrowDtype):
+ name = "float64"
+ type = pa.float64()
+ __doc__ = _dtype_docstring.format(dtype="float64")
+ _dtype_checker = pa.is_float64()
+
+
+INT_STR_TO_DTYPE: dict[str, FloatingArrowDtype] = {
+ "float16": Float16ArrowDtype(),
+ "float32": Float32ArrowDtype(),
+ "float64": Float64ArrowDtype(),
+}
diff --git a/pandas/core/arrays/arrow/numeric.py b/pandas/core/arrays/arrow/numeric.py
new file mode 100644
index 0000000000000..c02b6cefec6b3
--- /dev/null
+++ b/pandas/core/arrays/arrow/numeric.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Callable,
+ TypeVar,
+)
+
+import pyarrow as pa
+
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import cache_readonly
+
+from pandas.core.arrays.arrow.array import ArrowExtensionArray
+from pandas.core.arrays.arrow.dtype import ArrowDtype
+
+T = TypeVar("T", bound="FloatingArrowArray")
+
+
+class FloatingArrowDtype(ArrowDtype):
+ _default_pa_dtype: pa.null()
+ _dtype_checker: Callable[[Any], bool] # pa.types.is_<type>
+
+ @property
+ def _is_numeric(self) -> bool:
+ return True
+
+ @property
+ def _is_float(self) -> bool:
+ return True
+
+ @classmethod
+ def _str_to_dtype_mapping(cls):
+ raise AbstractMethodError(cls)
+
+
+class FloatingArrowArray(ArrowExtensionArray):
+ """
+ Base class for Floating dtypes.
+ """
+
+ _dtype_cls: type[FloatingArrowDtype]
+
+ def __init__(self, values: pa.ChunkedArray) -> None:
+ checker = self._dtype_cls._dtype_checker
+ if not (isinstance(values, pa.ChunkedArray) and checker(values.type)):
+ descr = "floating"
+ raise TypeError(f"values should be {descr} arrow array.")
+ super().__init__(values)
+
+ @cache_readonly
+ def dtype(self) -> FloatingArrowDtype:
+ mapping = self._dtype_cls._str_to_dtype_mapping()
+ return mapping[str(self._data.type)]
+
+ @classmethod
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
+ if dtype is None:
+ dtype = cls._dtype_cls._default_pa_dtype
+ return cls(pa.chunked_array([scalars], type=dtype.type))
+
+ @classmethod
+ def _from_sequence_of_strings(cls, strings, *, dtype=None, copy: bool = False):
+ from pandas.core.tools.numeric import to_numeric
+
+ scalars = to_numeric(strings, errors="raise")
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
| - Basing this off https://github.com/pandas-dev/pandas/pull/46972
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47027 | 2022-05-15T02:25:02Z | 2022-05-16T14:03:20Z | null | 2022-05-16T14:03:20Z |
DOC: Clarify decay argument validation in ewm when times is provided | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9f1c4755bc54f..4681257dcfca0 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -150,6 +150,7 @@ Other enhancements
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
+- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 32cb4938344c4..922d194f04c55 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -134,8 +134,9 @@ class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponentially weighted (EW) calculations.
- Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be
- provided.
+ Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be
+ provided if ``times`` is not provided. If ``times`` is provided,
+ ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided.
Parameters
----------
@@ -155,7 +156,7 @@ class ExponentialMovingWindow(BaseWindow):
:math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for
:math:`halflife > 0`.
- If ``times`` is specified, the time unit (str or timedelta) over which an
+ If ``times`` is specified, a timedelta convertible unit over which an
observation decays to half its value. Only applicable to ``mean()``,
and halflife value will not apply to the other functions.
@@ -389,10 +390,8 @@ def __init__(
raise ValueError("times must be datetime64[ns] dtype.")
if len(self.times) != len(obj):
raise ValueError("times must be the same length as the object.")
- if not isinstance(self.halflife, (str, datetime.timedelta)):
- raise ValueError(
- "halflife must be a string or datetime.timedelta object"
- )
+ if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)):
+ raise ValueError("halflife must be a timedelta convertible object")
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
@@ -404,7 +403,7 @@ def __init__(
self._com = 1.0
else:
if self.halflife is not None and isinstance(
- self.halflife, (str, datetime.timedelta)
+ self.halflife, (str, datetime.timedelta, np.timedelta64)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index f42a1a5449c5c..8977d1a0d9d1b 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -102,7 +102,7 @@ def engine_and_raw(request):
return request.param
-@pytest.fixture(params=["1 day", timedelta(days=1)])
+@pytest.fixture(params=["1 day", timedelta(days=1), np.timedelta64(1, "D")])
def halflife_with_times(request):
"""Halflife argument for EWM when times is specified."""
return request.param
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index b1e8b43258750..66cd36d121750 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -90,7 +90,7 @@ def test_ewma_times_not_same_length():
def test_ewma_halflife_not_correct_type():
- msg = "halflife must be a string or datetime.timedelta object"
+ msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
| - [x] closes #47003 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47026 | 2022-05-15T01:48:39Z | 2022-05-18T13:21:25Z | 2022-05-18T13:21:25Z | 2022-05-18T16:21:20Z |
DEPR: groupby numeric_only default | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 128fd68674f96..af30add139222 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -493,7 +493,8 @@ retained by specifying ``group_keys=False``.
``numeric_only`` default value
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Across the DataFrame operations such as ``min``, ``sum``, and ``idxmax``, the default
+Across the DataFrame and DataFrameGroupBy operations such as
+``min``, ``sum``, and ``idxmax``, the default
value of the ``numeric_only`` argument, if it exists at all, was inconsistent.
Furthermore, operations with the default value ``None`` can lead to surprising
results. (:issue:`46560`)
@@ -523,6 +524,8 @@ gained the ``numeric_only`` argument.
- :meth:`DataFrame.cov`
- :meth:`DataFrame.idxmin`
- :meth:`DataFrame.idxmax`
+- :meth:`.DataFrameGroupBy.cummin`
+- :meth:`.DataFrameGroupBy.cummax`
- :meth:`.DataFrameGroupBy.idxmin`
- :meth:`.DataFrameGroupBy.idxmax`
- :meth:`.GroupBy.var`
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f725ae061cedb..2acf5c826eb57 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -28,6 +28,7 @@
from pandas._libs import (
Interval,
+ lib,
reduction as libreduction,
)
from pandas._typing import (
@@ -1128,10 +1129,15 @@ def _wrap_applied_output_series(
return self._reindex_output(result)
def _cython_transform(
- self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
+ self,
+ how: str,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
+ axis: int = 0,
+ **kwargs,
) -> DataFrame:
assert axis == 0 # handled by caller
# TODO: no tests with self.ndim == 1 for DataFrameGroupBy
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis)
# With self.axis == 0, we have multi-block tests
# e.g. test_rank_min_int, test_cython_transform_frame
@@ -1139,7 +1145,8 @@ def _cython_transform(
# With self.axis == 1, _get_data_to_aggregate does a transpose
# so we always have a single block.
mgr: Manager2D = self._get_data_to_aggregate()
- if numeric_only:
+ orig_mgr_len = len(mgr)
+ if numeric_only_bool:
mgr = mgr.get_numeric_data(copy=False)
def arr_func(bvalues: ArrayLike) -> ArrayLike:
@@ -1152,8 +1159,8 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike:
res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True)
res_mgr.set_axis(1, mgr.axes[1])
- if len(res_mgr) < len(mgr):
- warn_dropping_nuisance_columns_deprecated(type(self), how)
+ if len(res_mgr) < orig_mgr_len:
+ warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only)
res_df = self.obj._constructor(res_mgr)
if self.axis == 1:
@@ -1269,7 +1276,9 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
output[i] = sgb.transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
- warn_dropping_nuisance_columns_deprecated(type(self), "transform")
+ warn_dropping_nuisance_columns_deprecated(
+ type(self), "transform", numeric_only=False
+ )
else:
inds.append(i)
@@ -1559,19 +1568,27 @@ def nunique(self, dropna: bool = True) -> DataFrame:
_shared_docs["idxmax"],
numeric_only_default="True for axis=0, False for axis=1",
)
- def idxmax(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):
+ def idxmax(
+ self,
+ axis=0,
+ skipna: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
+ ):
axis = DataFrame._get_axis_number(axis)
- if numeric_only is None:
- numeric_only = None if axis == 0 else False
+ if numeric_only is lib.no_default:
+ # Cannot use self._resolve_numeric_only; we must pass None to
+ # DataFrame.idxmax for backwards compatibility
+ numeric_only_arg = None if axis == 0 else False
+ else:
+ numeric_only_arg = cast(bool, numeric_only)
def func(df):
- # NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
- numeric_only=numeric_only,
+ numeric_only=numeric_only_arg,
)
indices = res._values
index = df._get_axis(axis)
@@ -1579,25 +1596,35 @@ def func(df):
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmax"
- return self._python_apply_general(func, self._obj_with_exclusions)
+ result = self._python_apply_general(func, self._obj_with_exclusions)
+ self._maybe_warn_numeric_only_depr("idxmax", result, numeric_only)
+ return result
@doc(
_shared_docs["idxmin"],
numeric_only_default="True for axis=0, False for axis=1",
)
- def idxmin(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):
+ def idxmin(
+ self,
+ axis=0,
+ skipna: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
+ ):
axis = DataFrame._get_axis_number(axis)
- if numeric_only is None:
- numeric_only = None if axis == 0 else False
+ if numeric_only is lib.no_default:
+ # Cannot use self._resolve_numeric_only; we must pass None to
+ # DataFrame.idxmin for backwards compatibility
+ numeric_only_arg = None if axis == 0 else False
+ else:
+ numeric_only_arg = cast(bool, numeric_only)
def func(df):
- # NB: here we use numeric_only=None, in DataFrame it is False GH#46560
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
- numeric_only=numeric_only,
+ numeric_only=numeric_only_arg,
)
indices = res._values
index = df._get_axis(axis)
@@ -1605,7 +1632,9 @@ def func(df):
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmin"
- return self._python_apply_general(func, self._obj_with_exclusions)
+ result = self._python_apply_general(func, self._obj_with_exclusions)
+ self._maybe_warn_numeric_only_depr("idxmin", result, numeric_only)
+ return result
boxplot = boxplot_frame_groupby
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 70f8e0a752dcb..0203d54e0de86 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -939,8 +939,15 @@ def wrapper(*args, **kwargs):
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
+ numeric_only = kwargs.get("numeric_only", lib.no_default)
+
def curried(x):
- return f(x, *args, **kwargs)
+ with warnings.catch_warnings():
+ # Catch any warnings from dispatch to DataFrame; we'll emit
+ # a warning for groupby below
+ match = "The default value of numeric_only "
+ warnings.filterwarnings("ignore", match, FutureWarning)
+ return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
@@ -956,6 +963,13 @@ def curried(x):
curried, self._obj_with_exclusions, is_transform=is_transform
)
+ if self._selected_obj.ndim != 1 and self.axis != 1:
+ missing = self._obj_with_exclusions.columns.difference(result.columns)
+ if len(missing) > 0:
+ warn_dropping_nuisance_columns_deprecated(
+ type(self), name, numeric_only
+ )
+
if self.grouper.has_dropped_na and is_transform:
# result will have dropped rows due to nans, fill with null
# and ensure index is ordered same as the input
@@ -1223,7 +1237,9 @@ def _wrap_applied_output(
):
raise AbstractMethodError(self)
- def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
+ def _resolve_numeric_only(
+ self, numeric_only: bool | lib.NoDefault, axis: int
+ ) -> bool:
"""
Determine subclass-specific default value for 'numeric_only'.
@@ -1233,6 +1249,8 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
Parameters
----------
numeric_only : bool or lib.no_default
+ axis : int
+ Axis passed to the groupby op (not self.axis).
Returns
-------
@@ -1243,7 +1261,7 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
# i.e. not explicitly passed by user
if self.obj.ndim == 2:
# i.e. DataFrameGroupBy
- numeric_only = True
+ numeric_only = axis != 1
# GH#42395 GH#43108 GH#43154
# Regression from 1.2.5 to 1.3 caused object columns to be dropped
if self.axis:
@@ -1253,7 +1271,6 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
check = obj._get_numeric_data()
if len(obj.columns) and not len(check.columns) and not obj.empty:
numeric_only = False
- # TODO: v1.4+ Add FutureWarning
else:
numeric_only = False
@@ -1262,6 +1279,27 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
# expected "bool")
return numeric_only # type: ignore[return-value]
+ def _maybe_warn_numeric_only_depr(
+ self, how: str, result: DataFrame | Series, numeric_only: bool | lib.NoDefault
+ ) -> None:
+ """Emit warning on numeric_only behavior deprecation when appropriate.
+
+ Parameters
+ ----------
+ how : str
+ Groupby kernel name.
+ result :
+ Result of the groupby operation.
+ numeric_only : bool or lib.no_default
+ Argument as passed by user.
+ """
+ if (
+ self._obj_with_exclusions.ndim != 1
+ and result.ndim > 1
+ and len(result.columns) < len(self._obj_with_exclusions.columns)
+ ):
+ warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only)
+
# -----------------------------------------------------------------
# numba
@@ -1522,7 +1560,9 @@ def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs):
except TypeError:
if raise_on_typeerror:
raise
- warn_dropping_nuisance_columns_deprecated(type(self), "agg")
+ warn_dropping_nuisance_columns_deprecated(
+ type(self), "agg", numeric_only=False
+ )
continue
key = base.OutputKey(label=name, position=idx)
@@ -1536,7 +1576,7 @@ def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs):
@final
def _agg_general(
self,
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = True,
min_count: int = -1,
*,
alias: str,
@@ -1598,17 +1638,19 @@ def _cython_agg_general(
self,
how: str,
alt: Callable,
- numeric_only: bool,
+ numeric_only: bool | lib.NoDefault,
min_count: int = -1,
ignore_failures: bool = True,
):
# Note: we never get here with how="ohlc" for DataFrameGroupBy;
# that goes through SeriesGroupBy
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
data = self._get_data_to_aggregate()
is_ser = data.ndim == 1
- if numeric_only:
+ orig_len = len(data)
+ if numeric_only_bool:
if is_ser and not is_numeric_dtype(self._selected_obj.dtype):
# GH#41291 match Series behavior
kwd_name = "numeric_only"
@@ -1638,8 +1680,8 @@ def array_func(values: ArrayLike) -> ArrayLike:
# continue and exclude the block
new_mgr = data.grouped_reduce(array_func, ignore_failures=ignore_failures)
- if not is_ser and len(new_mgr) < len(data):
- warn_dropping_nuisance_columns_deprecated(type(self), how)
+ if not is_ser and len(new_mgr) < orig_len:
+ warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only)
res = self._wrap_agged_manager(new_mgr)
if is_ser:
@@ -1997,7 +2039,7 @@ def mean(
2 4.0
Name: B, dtype: float64
"""
- numeric_only_bool = self._resolve_numeric_only(numeric_only)
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
if maybe_use_numba(engine):
from pandas.core._numba.kernels import sliding_mean
@@ -2007,7 +2049,7 @@ def mean(
result = self._cython_agg_general(
"mean",
alt=lambda x: Series(x).mean(numeric_only=numeric_only_bool),
- numeric_only=numeric_only_bool,
+ numeric_only=numeric_only,
)
return result.__finalize__(self.obj, method="groupby")
@@ -2031,12 +2073,12 @@ def median(self, numeric_only: bool | lib.NoDefault = lib.no_default):
Series or DataFrame
Median of values within each group.
"""
- numeric_only_bool = self._resolve_numeric_only(numeric_only)
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
result = self._cython_agg_general(
"median",
alt=lambda x: Series(x).median(numeric_only=numeric_only_bool),
- numeric_only=numeric_only_bool,
+ numeric_only=numeric_only,
)
return result.__finalize__(self.obj, method="groupby")
@@ -2092,7 +2134,7 @@ def std(
return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof))
else:
- return self._get_cythonized_result(
+ result = self._get_cythonized_result(
libgroupby.group_var,
cython_dtype=np.dtype(np.float64),
numeric_only=numeric_only,
@@ -2100,6 +2142,8 @@ def std(
post_processing=lambda vals, inference: np.sqrt(vals),
ddof=ddof,
)
+ self._maybe_warn_numeric_only_depr("std", result, numeric_only)
+ return result
@final
@Substitution(name="groupby")
@@ -2153,12 +2197,12 @@ def var(
return self._numba_agg_general(sliding_var, engine_kwargs, ddof)
else:
- numeric_only_bool = self._resolve_numeric_only(numeric_only)
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
if ddof == 1:
return self._cython_agg_general(
"var",
alt=lambda x: Series(x).var(ddof=ddof),
- numeric_only=numeric_only_bool,
+ numeric_only=numeric_only,
ignore_failures=numeric_only is lib.no_default,
)
else:
@@ -2193,6 +2237,8 @@ def sem(self, ddof: int = 1, numeric_only: bool | lib.NoDefault = lib.no_default
Standard error of the mean of values within each group.
"""
result = self.std(ddof=ddof, numeric_only=numeric_only)
+ self._maybe_warn_numeric_only_depr("sem", result, numeric_only)
+
if result.ndim == 1:
result /= np.sqrt(self.count())
else:
@@ -2253,8 +2299,6 @@ def sum(
engine_kwargs,
)
else:
- numeric_only = self._resolve_numeric_only(numeric_only)
-
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _agg_general() returns. GH #31422
@@ -2273,8 +2317,6 @@ def sum(
def prod(
self, numeric_only: bool | lib.NoDefault = lib.no_default, min_count: int = 0
):
- numeric_only = self._resolve_numeric_only(numeric_only)
-
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
)
@@ -3050,7 +3092,7 @@ def quantile(
a 2.0
b 3.0
"""
- numeric_only_bool = self._resolve_numeric_only(numeric_only)
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, np.dtype | None]:
if is_object_dtype(vals):
@@ -3153,7 +3195,9 @@ def blk_func(values: ArrayLike) -> ArrayLike:
and not is_ser
and len(res_mgr.items) != len(mgr.items)
):
- warn_dropping_nuisance_columns_deprecated(type(self), "quantile")
+ warn_dropping_nuisance_columns_deprecated(
+ type(self), "quantile", numeric_only
+ )
if len(res_mgr.items) == 0:
# re-call grouped_reduce to get the desired exception message
@@ -3447,7 +3491,7 @@ def cumsum(self, axis=0, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cummin(self, axis=0, **kwargs):
+ def cummin(self, axis=0, numeric_only=False, **kwargs):
"""
Cumulative min for each group.
@@ -3460,12 +3504,14 @@ def cummin(self, axis=0, **kwargs):
f = lambda x: np.minimum.accumulate(x, axis)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
- return self._cython_transform("cummin", numeric_only=False, skipna=skipna)
+ return self._cython_transform(
+ "cummin", numeric_only=numeric_only, skipna=skipna
+ )
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cummax(self, axis=0, **kwargs):
+ def cummax(self, axis=0, numeric_only=False, **kwargs):
"""
Cumulative max for each group.
@@ -3478,7 +3524,9 @@ def cummax(self, axis=0, **kwargs):
f = lambda x: np.maximum.accumulate(x, axis)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
- return self._cython_transform("cummax", numeric_only=False, skipna=skipna)
+ return self._cython_transform(
+ "cummax", numeric_only=numeric_only, skipna=skipna
+ )
@final
def _get_cythonized_result(
@@ -3532,7 +3580,7 @@ def _get_cythonized_result(
-------
`Series` or `DataFrame` with filled values
"""
- numeric_only = self._resolve_numeric_only(numeric_only)
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
if post_processing and not callable(post_processing):
raise ValueError("'post_processing' must be a callable!")
@@ -3601,15 +3649,16 @@ def blk_func(values: ArrayLike) -> ArrayLike:
# Operate block-wise instead of column-by-column
is_ser = obj.ndim == 1
mgr = self._get_data_to_aggregate()
+ orig_mgr_len = len(mgr)
- if numeric_only:
+ if numeric_only_bool:
mgr = mgr.get_numeric_data()
res_mgr = mgr.grouped_reduce(blk_func, ignore_failures=True)
- if not is_ser and len(res_mgr.items) != len(mgr.items):
+ if not is_ser and len(res_mgr.items) != orig_mgr_len:
howstr = how.replace("group_", "")
- warn_dropping_nuisance_columns_deprecated(type(self), howstr)
+ warn_dropping_nuisance_columns_deprecated(type(self), howstr, numeric_only)
if len(res_mgr.items) == 0:
# We re-call grouped_reduce to get the right exception message
@@ -4155,13 +4204,27 @@ def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiInde
return mi
-def warn_dropping_nuisance_columns_deprecated(cls, how: str) -> None:
- warnings.warn(
- "Dropping invalid columns in "
- f"{cls.__name__}.{how} is deprecated. "
- "In a future version, a TypeError will be raised. "
- f"Before calling .{how}, select only columns which "
- "should be valid for the function.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
+def warn_dropping_nuisance_columns_deprecated(cls, how: str, numeric_only) -> None:
+ if how == "add":
+ how = "sum"
+ if numeric_only is not lib.no_default and not numeric_only:
+ # numeric_only was specified and falsey but still dropped nuisance columns
+ warnings.warn(
+ "Dropping invalid columns in "
+ f"{cls.__name__}.{how} is deprecated. "
+ "In a future version, a TypeError will be raised. "
+ f"Before calling .{how}, select only columns which "
+ "should be valid for the function.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ elif numeric_only is lib.no_default:
+ warnings.warn(
+ "The default value of numeric_only in "
+ f"{cls.__name__}.{how} is deprecated. "
+ "In a future version, numeric_only will default to False. "
+ f"Either specify numeric_only or select only columns which "
+ "should be valid for the function.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 336865d32167d..711f1835446a5 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -1,5 +1,7 @@
import pytest
+from pandas.core.dtypes.common import is_numeric_dtype
+
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
@@ -96,7 +98,15 @@ def test_in_numeric_groupby(self, data_for_grouping):
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
- result = df.groupby("A").sum().columns
+
+ dtype = data_for_grouping.dtype
+ if is_numeric_dtype(dtype) or dtype.name == "decimal":
+ warn = None
+ else:
+ warn = FutureWarning
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index ba89a76a7f8c2..fedcc0e2a2284 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -1785,7 +1785,9 @@ def test_stack_multiple_bug(self):
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
- down = unst.resample("W-THU").mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ down = unst.resample("W-THU").mean()
rs = down.stack("ID")
xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID")
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index 2b248afb42057..b4a3a60e72139 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -71,7 +71,9 @@ def test_metadata_propagation_indiv_groupby(self):
"D": np.random.randn(8),
}
)
- result = df.groupby("A").sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").sum()
tm.assert_metadata_equivalent(df, result)
def test_metadata_propagation_indiv_resample(self):
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index bdb33bff5eadd..37b02571158b9 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -238,7 +238,10 @@ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype
[[1, 2, 3, 4, 5, 6]] * 3,
columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),
).astype({("a", "j"): dtype, ("b", "j"): dtype})
- result = df.groupby(level=1, axis=1).agg(func)
+ warn = FutureWarning if func == "std" else None
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = df.groupby(level=1, axis=1).agg(func)
expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
result_dtype_dict
)
@@ -262,7 +265,10 @@ def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
columns=Index([10, 20, 10, 20], name="x"),
dtype="int64",
).astype({10: "Int64"})
- result = df.groupby("x", axis=1).agg(func)
+ warn = FutureWarning if func == "std" else None
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = df.groupby("x", axis=1).agg(func)
expected = DataFrame(
data=expected_data,
index=Index([0, 1, 0], name="y"),
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 7c64d82608c9e..e541abb368a02 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -187,7 +187,9 @@ def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort):
if op in AGG_FUNCTIONS_WITH_SKIPNA:
grouped = frame.groupby(level=level, axis=axis, sort=sort)
- with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
+ with tm.assert_produces_warning(
+ warn, match="The 'mad' method is deprecated", raise_on_extra_warnings=False
+ ):
result = getattr(grouped, op)(skipna=skipna)
with tm.assert_produces_warning(FutureWarning):
expected = getattr(frame, op)(level=level, axis=axis, skipna=skipna)
@@ -196,8 +198,8 @@ def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort):
tm.assert_frame_equal(result, expected)
else:
grouped = frame.groupby(level=level, axis=axis, sort=sort)
- result = getattr(grouped, op)()
with tm.assert_produces_warning(FutureWarning):
+ result = getattr(grouped, op)()
expected = getattr(frame, op)(level=level, axis=axis)
if sort:
expected = expected.sort_index(axis=axis, level=level)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index abe1b8f13e32e..004e55f4d161f 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -103,7 +103,9 @@ def test_basic(): # TODO: split this test
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
- result = gb.sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
@@ -314,6 +316,7 @@ def test_apply(ordered):
tm.assert_series_equal(result, expected)
+@pytest.mark.filterwarnings("ignore:.*value of numeric_only.*:FutureWarning")
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
@@ -807,8 +810,12 @@ def test_preserve_categorical_dtype():
}
)
for col in ["C1", "C2"]:
- result1 = df.groupby(by=col, as_index=False, observed=False).mean()
- result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result1 = df.groupby(by=col, as_index=False, observed=False).mean()
+ result2 = (
+ df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
+ )
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index c99405dfccb66..206d37e1a800e 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -4,6 +4,7 @@
import numpy as np
import pytest
+from pandas._libs import lib
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
@@ -259,7 +260,9 @@ def _check(self, df, method, expected_columns, expected_columns_numeric):
# these have numeric_only kwarg, but default to False
warn = FutureWarning
- with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
+ with tm.assert_produces_warning(
+ warn, match="Dropping invalid columns", raise_on_extra_warnings=False
+ ):
result = getattr(gb, method)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
@@ -297,24 +300,26 @@ def gni(self, df):
return gni
# TODO: non-unique columns, as_index=False
- @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning")
def test_idxmax(self, gb):
# object dtype so idxmax goes through _aggregate_item_by_item
# GH#5610
# non-cython calls should not include the grouper
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
- result = gb.idxmax()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = gb.idxmax()
tm.assert_frame_equal(result, expected)
- @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning")
def test_idxmin(self, gb):
# object dtype so idxmax goes through _aggregate_item_by_item
# GH#5610
# non-cython calls should not include the grouper
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
- result = gb.idxmin()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = gb.idxmin()
tm.assert_frame_equal(result, expected)
def test_mad(self, gb, gni):
@@ -1238,3 +1243,114 @@ def test_groupby_sum_timedelta_with_nat():
res = gb["b"].sum(min_count=2)
expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index)
tm.assert_series_equal(res, expected)
+
+
+@pytest.mark.parametrize(
+ "kernel, numeric_only_default, drops_nuisance, has_arg",
+ [
+ ("all", False, False, False),
+ ("any", False, False, False),
+ ("bfill", False, False, False),
+ ("corr", True, False, True),
+ ("corrwith", True, False, True),
+ ("cov", True, False, True),
+ ("cummax", False, True, True),
+ ("cummin", False, True, True),
+ ("cumprod", True, True, True),
+ ("cumsum", True, True, True),
+ ("diff", False, False, False),
+ ("ffill", False, False, False),
+ ("fillna", False, False, False),
+ ("first", False, False, True),
+ ("idxmax", True, False, True),
+ ("idxmin", True, False, True),
+ ("last", False, False, True),
+ ("max", False, True, True),
+ ("mean", True, True, True),
+ ("median", True, True, True),
+ ("min", False, True, True),
+ ("nth", False, False, False),
+ ("nunique", False, False, False),
+ ("pct_change", False, False, False),
+ ("prod", True, True, True),
+ ("quantile", True, False, True),
+ ("sem", True, True, True),
+ ("skew", True, False, True),
+ ("std", True, True, True),
+ ("sum", True, True, True),
+ ("var", True, False, True),
+ ],
+)
+@pytest.mark.parametrize("numeric_only", [True, False, lib.no_default])
+@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
+def test_deprecate_numeric_only(
+ kernel, numeric_only_default, drops_nuisance, has_arg, numeric_only, keys
+):
+ # GH#46072
+ # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False
+ # has_arg: Whether the op has a numeric_only arg
+ df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})
+
+ if kernel == "corrwith":
+ args = (df,)
+ elif kernel == "nth" or kernel == "fillna":
+ args = (0,)
+ else:
+ args = ()
+ kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}
+
+ gb = df.groupby(keys)
+ method = getattr(gb, kernel)
+ if has_arg and (
+ # Cases where b does not appear in the result
+ numeric_only is True
+ or (numeric_only is lib.no_default and numeric_only_default)
+ or drops_nuisance
+ ):
+ if numeric_only is True or (not numeric_only_default and not drops_nuisance):
+ warn = None
+ else:
+ warn = FutureWarning
+ if numeric_only is lib.no_default and numeric_only_default:
+ msg = f"The default value of numeric_only in DataFrameGroupBy.{kernel}"
+ else:
+ msg = f"Dropping invalid columns in DataFrameGroupBy.{kernel}"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = method(*args, **kwargs)
+
+ assert "b" not in result.columns
+ elif (
+ # kernels that work on any dtype and have numeric_only arg
+ kernel in ("first", "last", "corrwith")
+ or (
+ # kernels that work on any dtype and don't have numeric_only arg
+ kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique")
+ and numeric_only is lib.no_default
+ )
+ ):
+ result = method(*args, **kwargs)
+ assert "b" in result.columns
+ elif has_arg:
+ assert numeric_only is not True
+ assert numeric_only is not lib.no_default or numeric_only_default is False
+ assert not drops_nuisance
+ # kernels that are successful on any dtype were above; this will fail
+ msg = (
+ "(not allowed for this dtype"
+ "|must be a string or a number"
+ "|cannot be performed against 'object' dtypes"
+ "|must be a string or a real number)"
+ )
+ with pytest.raises(TypeError, match=msg):
+ method(*args, **kwargs)
+ elif not has_arg and numeric_only is not lib.no_default:
+ with pytest.raises(
+ TypeError, match="got an unexpected keyword argument 'numeric_only'"
+ ):
+ method(*args, **kwargs)
+ else:
+ assert kernel in ("diff", "pct_change")
+ assert numeric_only is lib.no_default
+ # Doesn't have numeric_only argument and fails on nuisance columns
+ with pytest.raises(TypeError, match=r"unsupported operand type"):
+ method(*args, **kwargs)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 016e817e43402..61951292d55a8 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -474,13 +474,17 @@ def test_frame_groupby_columns(tsframe):
def test_frame_set_name_single(df):
grouped = df.groupby("A")
- result = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.mean()
assert result.index.name == "A"
- result = df.groupby("A", as_index=False).mean()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
- result = grouped.agg(np.mean)
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
@@ -503,8 +507,10 @@ def test_multi_func(df):
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
- agged = grouped.mean()
- expected = df.groupby(["A", "B"]).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = grouped.mean()
+ expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
@@ -661,13 +667,16 @@ def test_groupby_as_index_agg(df):
# single-key
- result = grouped.agg(np.mean)
- expected = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
- expected2 = grouped.mean()
- expected2["D"] = grouped.sum()["D"]
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected2 = grouped.mean()
+ expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
@@ -754,8 +763,10 @@ def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
- result = grouped["C"].agg(np.sum)
- expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped["C"].agg(np.sum)
+ expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
@@ -765,7 +776,8 @@ def test_as_index_series_return_frame(df):
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
- expected = grouped.sum().loc[:, ["A", "C"]]
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
@@ -789,8 +801,10 @@ def test_groupby_as_index_cython(df):
# single-key
grouped = data.groupby("A", as_index=False)
- result = grouped.mean()
- expected = data.groupby(["A"]).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.mean()
+ expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
@@ -859,15 +873,18 @@ def test_groupby_multi_corner(df):
def test_omit_nuisance(df):
grouped = df.groupby("A")
- agged = grouped.agg(np.mean)
- exp = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = grouped.agg(np.mean)
+ exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
- result = grouped.agg(np.sum)
- expected = grouped.sum()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.agg(np.sum)
+ expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
@@ -898,7 +915,7 @@ def test_keep_nuisance_agg(df, agg_function):
@pytest.mark.parametrize("numeric_only", [lib.no_default, True, False])
def test_omit_nuisance_agg(df, agg_function, numeric_only):
# GH 38774, GH 38815
- if not numeric_only and agg_function != "sum":
+ if numeric_only is lib.no_default or (not numeric_only and agg_function != "sum"):
# sum doesn't drop strings
warn = FutureWarning
else:
@@ -913,7 +930,13 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only):
with pytest.raises(klass, match="could not convert string to float"):
getattr(grouped, agg_function)(numeric_only=numeric_only)
else:
- with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
+ if numeric_only is lib.no_default:
+ msg = (
+ f"The default value of numeric_only in DataFrameGroupBy.{agg_function}"
+ )
+ else:
+ msg = "Dropping invalid columns"
+ with tm.assert_produces_warning(warn, match=msg):
result = getattr(grouped, agg_function)(numeric_only=numeric_only)
if (
(numeric_only is lib.no_default or not numeric_only)
@@ -923,9 +946,18 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only):
columns = ["A", "B", "C", "D"]
else:
columns = ["A", "C", "D"]
- expected = getattr(df.loc[:, columns].groupby("A"), agg_function)(
- numeric_only=numeric_only
- )
+ if agg_function == "sum" and numeric_only is False:
+ # sum doesn't drop nuisance string columns
+ warn = None
+ elif agg_function in ("sum", "std", "var", "sem") and numeric_only is not True:
+ warn = FutureWarning
+ else:
+ warn = None
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ expected = getattr(df.loc[:, columns].groupby("A"), agg_function)(
+ numeric_only=numeric_only
+ )
tm.assert_frame_equal(result, expected)
@@ -941,8 +973,10 @@ def test_omit_nuisance_warnings(df):
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
- agged = grouped.agg(np.mean)
- exp = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = grouped.agg(np.mean)
+ exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
@@ -959,8 +993,10 @@ def test_empty_groups_corner(mframe):
)
grouped = df.groupby(["k1", "k2"])
- result = grouped.agg(np.mean)
- expected = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
@@ -982,7 +1018,9 @@ def test_wrap_aggregated_output_multindex(mframe):
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
- agged = df.groupby(keys).agg(np.mean)
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
@@ -1143,15 +1181,19 @@ def test_groupby_with_hier_columns():
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
- result = df.groupby(level=0).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
- result = grouped.sum()
- expected = df.groupby("A").sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = grouped.sum()
+ expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
@@ -1179,8 +1221,10 @@ def test_groupby_wrong_multi_labels():
def test_groupby_series_with_name(df):
- result = df.groupby(df["A"]).mean()
- result2 = df.groupby(df["A"], as_index=False).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby(df["A"]).mean()
+ result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
@@ -1331,8 +1375,10 @@ def test_groupby_unit64_float_conversion():
def test_groupby_list_infer_array_like(df):
- result = df.groupby(list(df["A"])).mean()
- expected = df.groupby(df["A"]).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby(list(df["A"])).mean()
+ expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
@@ -1445,7 +1491,9 @@ def test_groupby_2d_malformed():
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
- tmp = d.groupby(["group"]).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ tmp = d.groupby(["group"]).mean()
res_values = np.array([[0.0, 1.0], [0.0, 1.0]])
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
@@ -1611,10 +1659,13 @@ def f(group):
def test_no_dummy_key_names(df):
# see gh-1291
- result = df.groupby(df["A"].values).sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby(df["A"].values).sum()
assert result.index.name is None
- result = df.groupby([df["A"].values, df["B"].values]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
@@ -2634,7 +2685,9 @@ def test_groupby_aggregation_numeric_with_non_numeric_dtype():
)
gb = df.groupby(by=["x"])
- result = gb.sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = gb.sum()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index 54cde30ceac92..b665843728165 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -112,5 +112,7 @@ def test_groupby_resample_preserves_subclass(obj):
df = df.set_index("Date")
# Confirm groupby.resample() preserves dataframe type
- result = df.groupby("Buyer").resample("5D").sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("Buyer").resample("5D").sum()
assert isinstance(result, obj)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index c6e4bec3f7b2c..85602fdf7274a 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -59,8 +59,10 @@ def test_column_select_via_attr(self, df):
tm.assert_series_equal(result, expected)
df["mean"] = 1.5
- result = df.groupby("A").mean()
- expected = df.groupby("A").agg(np.mean)
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").mean()
+ expected = df.groupby("A").agg(np.mean)
tm.assert_frame_equal(result, expected)
def test_getitem_list_of_columns(self):
@@ -284,25 +286,30 @@ def test_grouper_column_and_index(self):
{"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},
index=idx,
)
- result = df_multi.groupby(["B", pd.Grouper(level="inner")]).mean()
- expected = df_multi.reset_index().groupby(["B", "inner"]).mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df_multi.groupby(["B", pd.Grouper(level="inner")]).mean()
+ expected = df_multi.reset_index().groupby(["B", "inner"]).mean()
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
- result = df_multi.groupby([pd.Grouper(level="inner"), "B"]).mean()
- expected = df_multi.reset_index().groupby(["inner", "B"]).mean()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df_multi.groupby([pd.Grouper(level="inner"), "B"]).mean()
+ expected = df_multi.reset_index().groupby(["inner", "B"]).mean()
tm.assert_frame_equal(result, expected)
# Grouping a single-index frame by a column and the index should
# be equivalent to resetting the index and grouping by two columns
df_single = df_multi.reset_index("outer")
- result = df_single.groupby(["B", pd.Grouper(level="inner")]).mean()
- expected = df_single.reset_index().groupby(["B", "inner"]).mean()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df_single.groupby(["B", pd.Grouper(level="inner")]).mean()
+ expected = df_single.reset_index().groupby(["B", "inner"]).mean()
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
- result = df_single.groupby([pd.Grouper(level="inner"), "B"]).mean()
- expected = df_single.reset_index().groupby(["inner", "B"]).mean()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df_single.groupby([pd.Grouper(level="inner"), "B"]).mean()
+ expected = df_single.reset_index().groupby(["inner", "B"]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_levels_and_columns(self):
@@ -376,8 +383,10 @@ def test_empty_groups(self, df):
def test_groupby_grouper(self, df):
grouped = df.groupby("A")
- result = df.groupby(grouped.grouper).mean()
- expected = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby(grouped.grouper).mean()
+ expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_dict_mapping(self):
diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py
index 971a447b84cae..501a21981a148 100644
--- a/pandas/tests/groupby/test_index_as_string.py
+++ b/pandas/tests/groupby/test_index_as_string.py
@@ -47,8 +47,11 @@ def series():
],
)
def test_grouper_index_level_as_string(frame, key_strs, groupers):
- result = frame.groupby(key_strs).mean()
- expected = frame.groupby(groupers).mean()
+ warn = FutureWarning if "B" not in key_strs or "outer" in frame.columns else None
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = frame.groupby(key_strs).mean()
+ expected = frame.groupby(groupers).mean()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_pipe.py b/pandas/tests/groupby/test_pipe.py
index 1229251f88c7d..4f58bcb5ee763 100644
--- a/pandas/tests/groupby/test_pipe.py
+++ b/pandas/tests/groupby/test_pipe.py
@@ -60,7 +60,9 @@ def f(dfgb, arg1):
)
def g(dfgb, arg2):
- return dfgb.sum() / dfgb.sum().sum() + arg2
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py
index 0f7e71c99584d..20328426a69b2 100644
--- a/pandas/tests/groupby/test_quantile.py
+++ b/pandas/tests/groupby/test_quantile.py
@@ -246,9 +246,10 @@ def test_groupby_quantile_nullable_array(values, q):
def test_groupby_quantile_skips_invalid_dtype(q, numeric_only):
df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})
- if numeric_only is None or numeric_only:
+ if numeric_only is lib.no_default or numeric_only:
warn = FutureWarning if numeric_only is lib.no_default else None
- with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
+ msg = "The default value of numeric_only in DataFrameGroupBy.quantile"
+ with tm.assert_produces_warning(warn, match=msg):
result = df.groupby("a").quantile(q, numeric_only=numeric_only)
expected = df.groupby("a")[["b"]].quantile(q)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 7c9d6e7a73087..ae725cbb2b588 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -105,14 +105,18 @@ def test_groupby_with_timegrouper(self):
)
expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype="int64")
- result1 = df.resample("5D").sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result1 = df.resample("5D").sum()
tm.assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
- result2 = df_sorted.groupby(Grouper(freq="5D")).sum()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result2 = df_sorted.groupby(Grouper(freq="5D")).sum()
tm.assert_frame_equal(result2, expected)
- result3 = df.groupby(Grouper(freq="5D")).sum()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result3 = df.groupby(Grouper(freq="5D")).sum()
tm.assert_frame_equal(result3, expected)
@pytest.mark.parametrize("should_sort", [True, False])
@@ -186,7 +190,9 @@ def test_timegrouper_with_reg_groups(self):
}
).set_index(["Date", "Buyer"])
- result = df.groupby([Grouper(freq="A"), "Buyer"]).sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby([Grouper(freq="A"), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
expected = DataFrame(
@@ -201,7 +207,8 @@ def test_timegrouper_with_reg_groups(self):
],
}
).set_index(["Date", "Buyer"])
- result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
df_original = DataFrame(
@@ -239,10 +246,13 @@ def test_timegrouper_with_reg_groups(self):
}
).set_index(["Date", "Buyer"])
- result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum()
+ warn_msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum()
expected = DataFrame(
{
"Buyer": "Carl Joe Mark".split(),
@@ -258,7 +268,8 @@ def test_timegrouper_with_reg_groups(self):
# passing the name
df = df.reset_index()
- result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError, match="'The grouper name foo is not found'"):
@@ -266,9 +277,11 @@ def test_timegrouper_with_reg_groups(self):
# passing the level
df = df.set_index("Date")
- result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum()
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match="The level foo is not valid"):
@@ -277,7 +290,8 @@ def test_timegrouper_with_reg_groups(self):
# multi names
df = df.copy()
df["Date"] = df.index + offsets.MonthEnd(2)
- result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
expected = DataFrame(
{
"Buyer": "Carl Joe Mark".split(),
@@ -306,18 +320,22 @@ def test_timegrouper_with_reg_groups(self):
[datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date"
),
)
- result = df.groupby(Grouper(freq="1M")).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby(Grouper(freq="1M")).sum()
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M")]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M")]).sum()
tm.assert_frame_equal(result, expected)
expected.index = expected.index.shift(1)
assert expected.index.freq == offsets.MonthEnd()
- result = df.groupby(Grouper(freq="1M", key="Date")).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby(Grouper(freq="1M", key="Date")).sum()
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M", key="Date")]).sum()
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ result = df.groupby([Grouper(freq="1M", key="Date")]).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("freq", ["D", "M", "A", "Q-APR"])
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 0492b143eaf1f..b325edaf2b1ea 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -203,15 +203,24 @@ def test_transform_axis_1_reducer(request, reduction_func):
):
marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986")
request.node.add_marker(marker)
- warn = FutureWarning if reduction_func == "mad" else None
+ if reduction_func == "mad":
+ warn = FutureWarning
+ msg = "The 'mad' method is deprecated"
+ elif reduction_func in ("sem", "std"):
+ warn = FutureWarning
+ msg = "The default value of numeric_only"
+ else:
+ warn = None
+ msg = ""
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
- with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
+ with tm.assert_produces_warning(warn, match=msg):
result = df.groupby([0, 0, 1], axis=1).transform(reduction_func)
if reduction_func == "size":
# size doesn't behave in the same manner; hardcode expected result
expected = DataFrame(2 * [[2, 2, 1]], index=df.index, columns=df.columns)
else:
+ warn = FutureWarning if reduction_func == "mad" else None
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T
tm.assert_equal(result, expected)
@@ -462,8 +471,10 @@ def test_transform_exclude_nuisance(df):
def test_transform_function_aliases(df):
- result = df.groupby("A").transform("mean")
- expected = df.groupby("A").transform(np.mean)
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").transform("mean")
+ expected = df.groupby("A").transform(np.mean)
tm.assert_frame_equal(result, expected)
result = df.groupby("A")["C"].transform("mean")
@@ -774,8 +785,15 @@ def test_cython_transform_frame(op, args, targop):
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
- tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index(axis=1))
- tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1))
+
+ warn = None if op == "shift" else FutureWarning
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = gb.transform(op, *args).sort_index(axis=1)
+ tm.assert_frame_equal(result, expected)
+ with tm.assert_produces_warning(warn, match=msg):
+ result = getattr(gb, op)(*args).sort_index(axis=1)
+ tm.assert_frame_equal(result, expected)
# individual columns
for c in df:
if (
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index b5bae4759090a..21ef078bcf418 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -90,9 +90,10 @@ def test_groupby_resample_on_api():
}
)
- expected = df.set_index("dates").groupby("key").resample("D").mean()
-
- result = df.groupby("key").resample("D", on="dates").mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected = df.set_index("dates").groupby("key").resample("D").mean()
+ result = df.groupby("key").resample("D", on="dates").mean()
tm.assert_frame_equal(result, expected)
@@ -196,7 +197,9 @@ def tests_skip_nuisance(test_frame):
tm.assert_frame_equal(result, expected)
expected = r[["A", "B", "C"]].sum()
- result = r.sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = r.sum()
tm.assert_frame_equal(result, expected)
@@ -643,10 +646,15 @@ def test_selection_api_validation():
exp = df_exp.resample("2D").sum()
exp.index.name = "date"
- tm.assert_frame_equal(exp, df.resample("2D", on="date").sum())
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.resample("2D", on="date").sum()
+ tm.assert_frame_equal(exp, result)
exp.index.name = "d"
- tm.assert_frame_equal(exp, df.resample("2D", level="d").sum())
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.resample("2D", level="d").sum()
+ tm.assert_frame_equal(exp, result)
@pytest.mark.parametrize(
@@ -809,9 +817,13 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
func = getattr(resampled, method)
if method == "prod" and numeric_only is not True:
warn = FutureWarning
+ msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated"
+ elif method == "sum" and numeric_only is lib.no_default:
+ warn = FutureWarning
+ msg = "The default value of numeric_only in DataFrameGroupBy.sum is deprecated"
else:
warn = None
- msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated"
+ msg = ""
with tm.assert_produces_warning(warn, match=msg):
result = func(numeric_only=numeric_only)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index cae2d77dfbd3f..5392ec88544a1 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -408,7 +408,9 @@ def test_resample_groupby_agg():
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
- expected = resampled.sum()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected = resampled.sum()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 5d6df078ee8c3..905c2af2d22a5 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -553,7 +553,9 @@ def test_mixed_type_join_with_suffix(self):
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
- mn = grouped.mean()
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ mn = grouped.mean()
cn = grouped.count()
# it works!
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 31f720b9ec336..0d3b9f4561b55 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -146,8 +146,10 @@ def test_pivot_table_nocols(self):
df = DataFrame(
{"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]}
)
- rs = df.pivot_table(columns="cols", aggfunc=np.sum)
- xp = df.pivot_table(index="cols", aggfunc=np.sum).T
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ rs = df.pivot_table(columns="cols", aggfunc=np.sum)
+ xp = df.pivot_table(index="cols", aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"})
@@ -903,12 +905,19 @@ def test_no_col(self):
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
- table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean)
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ table = self.data.pivot_table(
+ index=["AA", "BB"], margins=True, aggfunc=np.mean
+ )
for value_col in table.columns:
totals = table.loc[("All", ""), value_col]
assert totals == self.data[value_col].mean()
- table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ table = self.data.pivot_table(
+ index=["AA", "BB"], margins=True, aggfunc="mean"
+ )
for item in ["DD", "EE", "FF"]:
totals = table.loc[("All", ""), item]
assert totals == self.data[item].mean()
@@ -964,7 +973,9 @@ def test_margin_with_only_columns_defined(
}
)
- result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns)
tm.assert_frame_equal(result, expected)
@@ -1990,8 +2001,11 @@ def test_pivot_string_as_func(self):
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
- result = pivot_table(self.data, index="A", columns="B", aggfunc=f)
- expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy)
+
+ msg = "The default value of numeric_only"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = pivot_table(self.data, index="A", columns="B", aggfunc=f)
+ expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
| - [x] closes #46072 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
There are two cases we want to emit a deprecation warning for DataFrameGroupBy:
- numeric_only is not specified and columns get dropped. In this case emit a warning that the default of numeric_only will change to False in the future.
- numeric_only is specified to False and columns still get dropped. In this case emit a warning that the op will raise in the future. | https://api.github.com/repos/pandas-dev/pandas/pulls/47025 | 2022-05-14T16:04:46Z | 2022-05-18T12:54:44Z | 2022-05-18T12:54:43Z | 2022-05-19T02:28:46Z |
DOC: Fix some typos in pandas/. | diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h
index 5b5995a671b2c..71df0c5a186b7 100644
--- a/pandas/_libs/src/ujson/lib/ultrajson.h
+++ b/pandas/_libs/src/ujson/lib/ultrajson.h
@@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/lib/ultrajsondec.c b/pandas/_libs/src/ujson/lib/ultrajsondec.c
index fee552672b8b6..c7779b8b428ae 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsondec.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsondec.c
@@ -32,7 +32,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
index 4469631b7b3f7..5d90710441a94 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
@@ -32,7 +32,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/python/JSONtoObj.c b/pandas/_libs/src/ujson/python/JSONtoObj.c
index 14683f4c28cbe..c58f25b8f99ea 100644
--- a/pandas/_libs/src/ujson/python/JSONtoObj.c
+++ b/pandas/_libs/src/ujson/python/JSONtoObj.c
@@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 7de47749e500c..73d2a1f786f8b 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c
index a8fdb4f55bfca..def06cdf2db84 100644
--- a/pandas/_libs/src/ujson/python/ujson.c
+++ b/pandas/_libs/src/ujson/python/ujson.c
@@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/_libs/src/ujson/python/version.h b/pandas/_libs/src/ujson/python/version.h
index 3f38642b6df87..15c55309d6270 100644
--- a/pandas/_libs/src/ujson/python/version.h
+++ b/pandas/_libs/src/ujson/python/version.h
@@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py
index d2e6b6e935ed5..84160344437b5 100644
--- a/pandas/core/array_algos/putmask.py
+++ b/pandas/core/array_algos/putmask.py
@@ -1,5 +1,5 @@
"""
-EA-compatible analogue to to np.putmask
+EA-compatible analogue to np.putmask
"""
from __future__ import annotations
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e015a3e5a941a..752ce28c58f55 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -720,7 +720,7 @@ def _values(self) -> np.ndarray:
if isinstance(vals, ABCDatetimeIndex):
# TODO: this can be removed after Timestamp.freq is removed
# The astype(object) below does not remove the freq from
- # the underlying Timestamps so we remove it here to to match
+ # the underlying Timestamps so we remove it here to match
# the behavior of self._get_level_values
vals = vals.copy()
vals.freq = None
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index ded525cd099fc..2e638f5b0fb3d 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1892,7 +1892,7 @@ def create_block_manager_from_blocks(
# If verify_integrity=False, then caller is responsible for checking
# all(x.shape[-1] == len(axes[1]) for x in blocks)
# sum(x.shape[0] for x in blocks) == len(axes[0])
- # set(x for for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0])))
+ # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0])))
# all(blk.ndim == 2 for blk in blocks)
# This allows us to safely pass verify_integrity=False
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py
index 9d0a2fa81b53b..3cc4fa4713831 100644
--- a/pandas/tests/indexes/multi/conftest.py
+++ b/pandas/tests/indexes/multi/conftest.py
@@ -8,7 +8,7 @@
)
-# Note: identical the the "multi" entry in the top-level "index" fixture
+# Note: identical the "multi" entry in the top-level "index" fixture
@pytest.fixture
def idx():
# a MultiIndex used to test the general functionality of the
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 426192ab46914..19ea6753c616c 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -1180,7 +1180,7 @@ def test_iloc_getitem_int_single_ea_block_view(self):
arr = interval_range(1, 10.0)._values
df = DataFrame(arr)
- # ser should be a *view* on the the DataFrame data
+ # ser should be a *view* on the DataFrame data
ser = df.iloc[2]
# if we have a view, then changing arr[2] should also change ser[0]
diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py
index a350b6fe7546d..8d9f075d8674d 100644
--- a/pandas/tests/io/formats/style/test_matplotlib.py
+++ b/pandas/tests/io/formats/style/test_matplotlib.py
@@ -216,7 +216,7 @@ def test_background_gradient_gmap_array_raises(gmap, axis):
],
)
def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap):
- # test gmap given as DataFrame that it aligns to the the data including subset
+ # test gmap given as DataFrame that it aligns to the data including subset
expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset)
result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset)
assert expected._compute().ctx == result._compute().ctx
@@ -232,7 +232,7 @@ def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, ex
],
)
def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap):
- # test gmap given as Series that it aligns to the the data including subset
+ # test gmap given as Series that it aligns to the data including subset
expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute()
result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute()
assert expected.ctx == result.ctx
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47022 | 2022-05-14T12:40:39Z | 2022-05-18T13:22:08Z | 2022-05-18T13:22:08Z | 2022-05-18T13:22:12Z |
MAINT: Fix some typos. | diff --git a/LICENSES/ULTRAJSON_LICENSE b/LICENSES/ULTRAJSON_LICENSE
index 3b2886eb9cfae..a905fb017d813 100644
--- a/LICENSES/ULTRAJSON_LICENSE
+++ b/LICENSES/ULTRAJSON_LICENSE
@@ -28,7 +28,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
-Numeric decoder derived from from TCL library
+Numeric decoder derived from TCL library
http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index ff6bb582e1af5..d871f907232f5 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -146,10 +146,10 @@ def setup(self, fill_value):
def make_block_array(self, length, num_blocks, block_size, fill_value):
arr = np.full(length, fill_value)
- indicies = np.random.choice(
+ indices = np.random.choice(
np.arange(0, length, block_size), num_blocks, replace=False
)
- for ind in indicies:
+ for ind in indices:
arr[ind : ind + block_size] = np.random.randint(0, 100, block_size)
return SparseArray(arr, fill_value=fill_value)
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 4ed71913d7b4d..4f6972f3eaf2a 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3529,7 +3529,7 @@ See the :ref:`cookbook<cookbook.excel>` for some advanced strategies.
**Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.**
This is no longer supported, switch to using ``openpyxl`` instead.
- Attempting to use the the ``xlwt`` engine will raise a ``FutureWarning``
+ Attempting to use the ``xlwt`` engine will raise a ``FutureWarning``
unless the option :attr:`io.excel.xls.writer` is set to ``"xlwt"``.
While this option is now deprecated and will also raise a ``FutureWarning``,
it can be globally set and the warning suppressed. Users are recommended to
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 85c552a7d596f..0b2341bef413e 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -612,7 +612,7 @@
"source": [
"### Acting on the Index and Column Headers\n",
"\n",
- "Similar application is acheived for headers by using:\n",
+ "Similar application is achieved for headers by using:\n",
" \n",
"- [.applymap_index()][applymapindex] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.\n",
"- [.apply_index()][applyindex] (level-wise): accepts a function that takes a Series and returns a Series, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each level of your Index one-at-a-time. To style the index use `axis=0` and to style the column headers use `axis=1`.\n",
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47021 | 2022-05-14T12:38:56Z | 2022-05-15T02:58:33Z | 2022-05-15T02:58:33Z | 2022-05-15T02:58:39Z |
CI: Move 32 bit Linux build to GHA | diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
new file mode 100644
index 0000000000000..500e800a082d9
--- /dev/null
+++ b/.github/workflows/32-bit-linux.yml
@@ -0,0 +1,43 @@
+name: 32 Bit Linux
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+jobs:
+ pytest:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Run 32-bit manylinux2014 Docker Build / Tests
+ run: |
+ docker pull quay.io/pypa/manylinux2014_i686
+ docker run --platform linux/386 -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
+ /bin/bash -xc "cd pandas && \
+ /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
+ . ~/virtualenvs/pandas-dev/bin/activate && \
+ python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ python setup.py build_ext -q -j2 && \
+ python -m pip install --no-build-isolation --no-use-pep517 -e . && \
+ export PANDAS_CI=1 && \
+ pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
+
+ - name: Publish test results for Python 3.8-32 bit full Linux
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 8ca4cce155e96..23a48e567dfe9 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -2,7 +2,7 @@
# Unfreeze(by commentingthe if: false() condition) once the
# next Python Dev version has released beta 1 and both Cython and numpy support it
# After that Python has released, migrate the workflows to the
-# posix GHA workflows/Azure pipelines and "freeze" this file by
+# posix GHA workflows and "freeze" this file by
# uncommenting the if: false() condition
# Feel free to modify this comment as necessary.
diff --git a/README.md b/README.md
index 2216f59965354..fc3f988dc6809 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,6 @@
[](https://doi.org/10.5281/zenodo.3509134)
[](https://pypi.org/project/pandas/)
[](https://github.com/pandas-dev/pandas/blob/main/LICENSE)
-[](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=main)
[](https://codecov.io/gh/pandas-dev/pandas)
[](https://pepy.tech/project/pandas)
[](https://gitter.im/pydata/pandas)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 37df662df8edc..0000000000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
-trigger:
- branches:
- include:
- - main
- - 1.4.x
- paths:
- exclude:
- - 'doc/**'
-
-pr:
- autoCancel: true
- branches:
- include:
- - main
- - 1.4.x
-
-variables:
- PYTEST_WORKERS: auto
- PYTEST_TARGET: pandas
- PATTERN: "not slow and not db and not network and not single_cpu"
- PANDAS_CI: 1
-
-jobs:
-- job: py38_32bit
- pool:
- vmImage: ubuntu-18.04
-
- steps:
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- - script: |
- docker pull quay.io/pypa/manylinux2014_i686
- docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
- /bin/bash -xc "cd pandas && \
- /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
- . ~/virtualenvs/pandas-dev/bin/activate && \
- python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
- pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
- python setup.py build_ext -q -j2 && \
- python -m pip install --no-build-isolation -e . && \
- export PANDAS_CI=1 && \
- pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
- displayName: 'Run 32-bit manylinux2014 Docker Build / Tests'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python 3.8-32 bit full Linux'
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index b0ba275e3d895..fcaa8adcdcae9 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -289,13 +289,11 @@ library. This makes type checkers aware of the type annotations shipped with pan
Testing with continuous integration
-----------------------------------
-The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ and
-`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__
+The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__
continuous integration services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
then the continuous integration services need to be hooked to your GitHub repository. Instructions are here
-for `GitHub Actions <https://docs.github.com/en/actions/>`__ and
-`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/?view=azure-devops>`__.
+for `GitHub Actions <https://docs.github.com/en/actions/>`__.
A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing,
then you will get a red 'X', where you can click through to see the individual failed tests.
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b84d6fc9c2b99..dc03f081388b8 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -658,7 +658,7 @@ def index_with_missing(request):
"""
# GH 35538. Use deep copy to avoid illusive bug on np-dev
- # Azure pipeline that writes into indices_dict despite copy
+ # GHA pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index ff31d93947776..522d25205eeb0 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -71,7 +71,7 @@ def s3_base(worker_id):
if is_platform_arm() or is_platform_mac() or is_platform_windows():
# NOT RUN on Windows/MacOS/ARM, only Ubuntu
# - subprocess in CI can cause timeouts
- # - Azure pipelines/Github Actions do not support
+ # - Github Actions do not support
# container services for the above OSs
# - CircleCI will probably hit the Docker rate pull limit
pytest.skip(
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 4cf6306dc39e5..a029c88fa3a7d 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -20,7 +20,7 @@
# TODO(GH#44584): Mark these as pytest.mark.single_cpu
pytestmark = pytest.mark.skipif(
is_ci_environment() and (is_platform_windows() or is_platform_mac()),
- reason="On Azure CI, Windows can fail with "
+ reason="On GHA CI, Windows can fail with "
"'Windows fatal exception: stack overflow' "
"and MacOS can timeout",
)
diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py
index ab435a39a497b..b98129e1b07ec 100644
--- a/pandas/tests/window/test_online.py
+++ b/pandas/tests/window/test_online.py
@@ -17,7 +17,7 @@
# TODO(GH#44584): Mark these as pytest.mark.single_cpu
pytestmark = pytest.mark.skipif(
is_ci_environment() and (is_platform_windows() or is_platform_mac()),
- reason="On Azure CI, Windows can fail with "
+ reason="On GHA CI, Windows can fail with "
"'Windows fatal exception: stack overflow' "
"and MacOS can timeout",
)
| - [x] closes #46351 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
Also removes references to Azure since no builds run there anymore. | https://api.github.com/repos/pandas-dev/pandas/pulls/47020 | 2022-05-13T21:47:31Z | 2022-05-15T02:56:13Z | 2022-05-15T02:56:13Z | 2022-05-15T13:55:00Z |
REF: simplify tzconversion | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index b3a006141fadc..72bc6886b5175 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -58,9 +58,7 @@
)
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.timezones import tz_compare
-from pandas._libs.tslibs.tzconversion import (
- py_tz_convert_from_utc_single as tz_convert_from_utc_single,
-)
+from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single
from pandas._libs.tslibs.vectorized import (
dt64arr_to_periodarr,
get_resolution,
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 6c96df9a7ea0b..d06dc3160995d 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -54,10 +54,7 @@ from pandas._libs.tslibs.ccalendar cimport (
get_firstbday,
get_lastbday,
)
-from pandas._libs.tslibs.conversion cimport (
- convert_datetime_to_tsobject,
- localize_pydatetime,
-)
+from pandas._libs.tslibs.conversion cimport localize_pydatetime
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
@@ -68,7 +65,6 @@ from pandas._libs.tslibs.np_datetime cimport (
npy_datetimestruct,
pydate_to_dtstruct,
)
-from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single
from .dtypes cimport PeriodDtypeCode
from .timedeltas cimport (
@@ -270,10 +266,8 @@ cdef _to_dt64D(dt):
if getattr(dt, 'tzinfo', None) is not None:
# Get the nanosecond timestamp,
# equiv `Timestamp(dt).value` or `dt.timestamp() * 10**9`
- nanos = getattr(dt, "nanosecond", 0)
- i8 = convert_datetime_to_tsobject(dt, tz=None, nanos=nanos).value
- dt = tz_convert_from_utc_single(i8, dt.tzinfo)
- dt = np.int64(dt).astype('datetime64[ns]')
+ naive = dt.astimezone(None)
+ dt = np.datetime64(naive, "D")
else:
dt = np.datetime64(dt)
if dt.dtype.name != "datetime64[D]":
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index fcc9390a2cccd..abdb4aebb625f 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1979,22 +1979,19 @@ default 'raise'
value = tz_localize_to_utc_single(self.value, tz,
ambiguous=ambiguous,
nonexistent=nonexistent)
- out = Timestamp(value, tz=tz)
- if out is not NaT:
- out._set_freq(self._freq) # avoid warning in constructor
- return out
+ elif tz is None:
+ # reset tz
+ value = tz_convert_from_utc_single(self.value, self.tz)
+
else:
- if tz is None:
- # reset tz
- value = tz_convert_from_utc_single(self.value, self.tz)
- out = Timestamp(value, tz=tz)
- if out is not NaT:
- out._set_freq(self._freq) # avoid warning in constructor
- return out
- else:
- raise TypeError(
- "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
- )
+ raise TypeError(
+ "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
+ )
+
+ out = Timestamp(value, tz=tz)
+ if out is not NaT:
+ out._set_freq(self._freq) # avoid warning in constructor
+ return out
def tz_convert(self, tz):
"""
diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index 600ac54639dfc..2acad9ea34062 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -6,8 +6,8 @@ from numpy cimport (
)
-cdef int64_t tz_convert_from_utc_single(
- int64_t utc_val, tzinfo tz, bint* fold=?, Py_ssize_t* outpos=?
+cpdef int64_t tz_convert_from_utc_single(
+ int64_t utc_val, tzinfo tz
) except? -1
cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=*
diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi
index 8647dee712294..2531383b658fc 100644
--- a/pandas/_libs/tslibs/tzconversion.pyi
+++ b/pandas/_libs/tslibs/tzconversion.pyi
@@ -8,8 +8,8 @@ import numpy as np
from pandas._typing import npt
-# py_tz_convert_from_utc_single exposed for testing
-def py_tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ...
+# tz_convert_from_utc_single exposed for testing
+def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ...
def tz_localize_to_utc(
vals: npt.NDArray[np.int64],
tz: tzinfo | None,
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index fede9768f5fee..0cdc7b777f45f 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -183,29 +183,28 @@ timedelta-like}
localized : ndarray[int64_t]
"""
cdef:
- const int64_t[::1] deltas
ndarray[uint8_t, cast=True] ambiguous_array
- Py_ssize_t i, idx, pos, ntrans, n = vals.shape[0]
+ Py_ssize_t i, idx, pos, n = vals.shape[0]
Py_ssize_t delta_idx_offset, delta_idx, pos_left, pos_right
- int64_t *tdata
int64_t v, left, right, val, new_local, remaining_mins
int64_t first_delta, delta
int64_t shift_delta = 0
- ndarray[int64_t] trans, result_a, result_b, dst_hours
+ ndarray[int64_t] result_a, result_b, dst_hours
int64_t[::1] result
npy_datetimestruct dts
bint infer_dst = False, is_dst = False, fill = False
bint shift_forward = False, shift_backward = False
bint fill_nonexist = False
str stamp
+ Localizer info = Localizer(tz)
# Vectorized version of DstTzInfo.localize
- if is_utc(tz) or tz is None:
+ if info.use_utc:
return vals.copy()
result = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
- if is_tzlocal(tz) or is_zoneinfo(tz):
+ if info.use_tzlocal:
for i in range(n):
v = vals[i]
if v == NPY_NAT:
@@ -214,9 +213,8 @@ timedelta-like}
result[i] = v - _tz_localize_using_tzinfo_api(v, tz, to_utc=True)
return result.base # to return underlying ndarray
- elif is_fixed_offset(tz):
- _, deltas, _ = get_dst_info(tz)
- delta = deltas[0]
+ elif info.use_fixed:
+ delta = info.delta
for i in range(n):
v = vals[i]
if v == NPY_NAT:
@@ -259,14 +257,9 @@ timedelta-like}
"shift_backwards} or a timedelta object")
raise ValueError(msg)
- trans, deltas, _ = get_dst_info(tz)
-
- tdata = <int64_t*>cnp.PyArray_DATA(trans)
- ntrans = trans.shape[0]
-
# Determine whether each date lies left of the DST transition (store in
# result_a) or right of the DST transition (store in result_b)
- result_a, result_b =_get_utc_bounds(vals, tdata, ntrans, deltas)
+ result_a, result_b =_get_utc_bounds(vals, info.tdata, info.ntrans, info.deltas)
# silence false-positive compiler warning
dst_hours = np.empty(0, dtype=np.int64)
@@ -278,7 +271,7 @@ timedelta-like}
# Shift the delta_idx by if the UTC offset of
# the target tz is greater than 0 and we're moving forward
# or vice versa
- first_delta = deltas[0]
+ first_delta = info.deltas[0]
if (shift_forward or shift_delta > 0) and first_delta > 0:
delta_idx_offset = 1
elif (shift_backward or shift_delta < 0) and first_delta < 0:
@@ -336,10 +329,10 @@ timedelta-like}
# nonexistent times
new_local = val - remaining_mins - 1
- delta_idx = bisect_right_i8(tdata, new_local, ntrans)
+ delta_idx = bisect_right_i8(info.tdata, new_local, info.ntrans)
delta_idx = delta_idx - delta_idx_offset
- result[i] = new_local - deltas[delta_idx]
+ result[i] = new_local - info.deltas[delta_idx]
elif fill_nonexist:
result[i] = NPY_NAT
else:
@@ -519,19 +512,7 @@ cdef ndarray[int64_t] _get_dst_hours(
# ----------------------------------------------------------------------
# Timezone Conversion
-def py_tz_convert_from_utc_single(int64_t utc_val, tzinfo tz):
- # The 'bint* fold=NULL' in tz_convert_from_utc_single means we cannot
- # make it cdef, so this is version exposed for testing from python.
- return tz_convert_from_utc_single(utc_val, tz)
-
-
-@cython.boundscheck(False)
-cdef int64_t tz_convert_from_utc_single(
- int64_t utc_val,
- tzinfo tz,
- bint* fold=NULL,
- Py_ssize_t* outpos=NULL,
-) except? -1:
+cpdef int64_t tz_convert_from_utc_single(int64_t utc_val, tzinfo tz) except? -1:
"""
Convert the val (in i8) from UTC to tz
@@ -541,8 +522,6 @@ cdef int64_t tz_convert_from_utc_single(
----------
utc_val : int64
tz : tzinfo
- fold : bint*, default NULL
- outpos : Py_ssize_t*, default NULL
Returns
-------
@@ -552,13 +531,8 @@ cdef int64_t tz_convert_from_utc_single(
Localizer info = Localizer(tz)
Py_ssize_t pos
- if utc_val == NPY_NAT:
- return utc_val
-
- if outpos is not NULL and info.use_pytz:
- return info.utc_val_to_local_val(utc_val, outpos, fold)
- else:
- return info.utc_val_to_local_val(utc_val, &pos, fold)
+ # Note: caller is responsible for ensuring utc_val != NPY_NAT
+ return info.utc_val_to_local_val(utc_val, &pos)
# OSError may be thrown by tzlocal on windows at or close to 1970-01-01
diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py
index 99be0e63d58e2..ade5a2077767f 100644
--- a/pandas/tests/tslibs/test_conversion.py
+++ b/pandas/tests/tslibs/test_conversion.py
@@ -22,7 +22,7 @@
def _compare_utc_to_local(tz_didx):
def f(x):
- return tzconversion.py_tz_convert_from_utc_single(x, tz_didx.tz)
+ return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz)
result = tz_convert_from_utc(tz_didx.asi8, tz_didx.tz)
expected = np.vectorize(f)(tz_didx.asi8)
| Broken off branch implementing tzconversion for non-nano | https://api.github.com/repos/pandas-dev/pandas/pulls/47019 | 2022-05-13T20:54:57Z | 2022-05-15T02:55:22Z | 2022-05-15T02:55:22Z | 2022-05-15T16:10:55Z |
REF: merge datetime_to_datetime64 into array_to_datetime | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 9492888e7db77..e6bbf52ab1272 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -59,6 +59,7 @@ from pandas._libs.tslibs.nattype cimport (
c_nat_strings as nat_strings,
)
from pandas._libs.tslibs.timestamps cimport _Timestamp
+from pandas._libs.tslibs.timezones cimport tz_compare
from pandas._libs.tslibs import (
Resolution,
@@ -447,6 +448,7 @@ cpdef array_to_datetime(
bint string_to_dts_failed
datetime py_dt
tzinfo tz_out = None
+ bint found_tz = False, found_naive = False
# specify error conditions
assert is_raise or is_ignore or is_coerce
@@ -465,18 +467,34 @@ cpdef array_to_datetime(
elif PyDateTime_Check(val):
seen_datetime = True
if val.tzinfo is not None:
+ found_tz = True
if utc_convert:
_ts = convert_datetime_to_tsobject(val, None)
iresult[i] = _ts.value
- else:
+ elif found_naive:
raise ValueError('Tz-aware datetime.datetime '
'cannot be converted to '
'datetime64 unless utc=True')
- elif isinstance(val, _Timestamp):
- iresult[i] = val.value
+ elif tz_out is not None and not tz_compare(tz_out, val.tzinfo):
+ raise ValueError('Tz-aware datetime.datetime '
+ 'cannot be converted to '
+ 'datetime64 unless utc=True')
+ else:
+ found_tz = True
+ tz_out = val.tzinfo
+ _ts = convert_datetime_to_tsobject(val, None)
+ iresult[i] = _ts.value
+
else:
- iresult[i] = pydatetime_to_dt64(val, &dts)
- check_dts_bounds(&dts)
+ found_naive = True
+ if found_tz:
+ raise ValueError('Cannot mix tz-aware with '
+ 'tz-naive values')
+ if isinstance(val, _Timestamp):
+ iresult[i] = val.value
+ else:
+ iresult[i] = pydatetime_to_dt64(val, &dts)
+ check_dts_bounds(&dts)
elif PyDate_Check(val):
seen_datetime = True
diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi
index ca6f301673f33..16fe853eef815 100644
--- a/pandas/_libs/tslibs/conversion.pyi
+++ b/pandas/_libs/tslibs/conversion.pyi
@@ -23,7 +23,4 @@ def ensure_timedelta64ns(
arr: np.ndarray, # np.ndarray[timedelta64[ANY]]
copy: bool = ...,
) -> np.ndarray: ... # np.ndarray[timedelta64ns]
-def datetime_to_datetime64(
- values: npt.NDArray[np.object_],
-) -> tuple[np.ndarray, tzinfo | None]: ... # (np.ndarray[dt64ns], _)
def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index e5217259a3648..5b7da7347a238 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -264,80 +264,6 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool = True):
return dt64_result.view(TD64NS_DTYPE)
-# ----------------------------------------------------------------------
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def datetime_to_datetime64(ndarray values):
- # ndarray[object], but can't declare object without ndim
- """
- Convert ndarray of datetime-like objects to int64 array representing
- nanosecond timestamps.
-
- Parameters
- ----------
- values : ndarray[object]
-
- Returns
- -------
- result : ndarray[datetime64ns]
- inferred_tz : tzinfo or None
- """
- cdef:
- Py_ssize_t i, n = values.size
- object val
- int64_t ival
- ndarray iresult # int64_t, but can't declare that without specifying ndim
- npy_datetimestruct dts
- _TSObject _ts
- bint found_naive = False
- tzinfo inferred_tz = None
-
- cnp.broadcast mi
-
- result = np.empty((<object>values).shape, dtype='M8[ns]')
- iresult = result.view('i8')
-
- mi = cnp.PyArray_MultiIterNew2(iresult, values)
- for i in range(n):
- # Analogous to: val = values[i]
- val = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
-
- if checknull_with_nat(val):
- ival = NPY_NAT
- elif PyDateTime_Check(val):
- if val.tzinfo is not None:
- if found_naive:
- raise ValueError('Cannot mix tz-aware with '
- 'tz-naive values')
- if inferred_tz is not None:
- if not tz_compare(val.tzinfo, inferred_tz):
- raise ValueError('Array must be all same time zone')
- else:
- inferred_tz = val.tzinfo
-
- _ts = convert_datetime_to_tsobject(val, None)
- ival = _ts.value
- check_dts_bounds(&_ts.dts)
- else:
- found_naive = True
- if inferred_tz is not None:
- raise ValueError('Cannot mix tz-aware with '
- 'tz-naive values')
- ival = pydatetime_to_dt64(val, &dts)
- check_dts_bounds(&dts)
- else:
- raise TypeError(f'Unrecognized value type: {type(val)}')
-
- # Analogous to: iresult[i] = ival
- (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = ival
-
- cnp.PyArray_MultiIter_NEXT(mi)
-
- return result, inferred_tz
-
-
# ----------------------------------------------------------------------
# _TSObject Conversion
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index dadfad394b903..ec6da61bde6c6 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2263,14 +2263,6 @@ def objects_to_datetime64ns(
allow_mixed=allow_mixed,
)
result = result.reshape(data.shape, order=order)
- except ValueError as err:
- try:
- values, tz_parsed = conversion.datetime_to_datetime64(data)
- # If tzaware, these values represent unix timestamps, so we
- # return them as i8 to distinguish from wall times
- return values.view("i8"), tz_parsed
- except (ValueError, TypeError):
- raise err
except OverflowError as err:
# Exception is raised when a part of date is greater than 32 bit signed int
raise OutOfBoundsDatetime("Out of bounds nanosecond timestamp") from err
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index d231dc10d1004..d6dda373bdf92 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -24,7 +24,6 @@
OutOfBoundsDatetime,
Timedelta,
Timestamp,
- conversion,
iNaT,
nat_strings,
parsing,
@@ -41,6 +40,7 @@
ArrayLike,
DateTimeErrorChoices,
Timezone,
+ npt,
)
from pandas.util._exceptions import find_stack_level
@@ -467,8 +467,6 @@ def _array_strptime_with_fallback(
try:
result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors)
- if "%Z" in fmt or "%z" in fmt:
- return _return_parsed_timezone_results(result, timezones, tz, name)
except OutOfBoundsDatetime:
if errors == "raise":
raise
@@ -494,6 +492,9 @@ def _array_strptime_with_fallback(
else:
# Indicates to the caller to fallback to objects_to_datetime64ns
return None
+ else:
+ if "%Z" in fmt or "%z" in fmt:
+ return _return_parsed_timezone_results(result, timezones, tz, name)
return _box_as_indexlike(result, utc=utc, name=name)
@@ -512,38 +513,28 @@ def _to_datetime_with_format(
Try parsing with the given format, returning None on failure.
"""
result = None
- try:
- # shortcut formatting here
- if fmt == "%Y%m%d":
- # pass orig_arg as float-dtype may have been converted to
- # datetime64[ns]
- orig_arg = ensure_object(orig_arg)
- try:
- # may return None without raising
- result = _attempt_YYYYMMDD(orig_arg, errors=errors)
- except (ValueError, TypeError, OutOfBoundsDatetime) as err:
- raise ValueError(
- "cannot convert the input to '%Y%m%d' date format"
- ) from err
- if result is not None:
- utc = tz == "utc"
- return _box_as_indexlike(result, utc=utc, name=name)
- # fallback
- res = _array_strptime_with_fallback(
- arg, name, tz, fmt, exact, errors, infer_datetime_format
- )
- return res
-
- except ValueError as err:
- # Fallback to try to convert datetime objects if timezone-aware
- # datetime objects are found without passing `utc=True`
+ # shortcut formatting here
+ if fmt == "%Y%m%d":
+ # pass orig_arg as float-dtype may have been converted to
+ # datetime64[ns]
+ orig_arg = ensure_object(orig_arg)
try:
- values, tz = conversion.datetime_to_datetime64(arg)
- dta = DatetimeArray(values, dtype=tz_to_dtype(tz))
- return DatetimeIndex._simple_new(dta, name=name)
- except (ValueError, TypeError):
- raise err
+ # may return None without raising
+ result = _attempt_YYYYMMDD(orig_arg, errors=errors)
+ except (ValueError, TypeError, OutOfBoundsDatetime) as err:
+ raise ValueError(
+ "cannot convert the input to '%Y%m%d' date format"
+ ) from err
+ if result is not None:
+ utc = tz == "utc"
+ return _box_as_indexlike(result, utc=utc, name=name)
+
+ # fallback
+ res = _array_strptime_with_fallback(
+ arg, name, tz, fmt, exact, errors, infer_datetime_format
+ )
+ return res
def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index:
@@ -1007,17 +998,6 @@ def to_datetime(
DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'],
dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None)
- - Finally, mixing timezone-aware strings and :class:`datetime.datetime` always
- raises an error, even if the elements all have the same time offset.
-
- >>> from datetime import datetime, timezone, timedelta
- >>> d = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1)))
- >>> pd.to_datetime(["2020-01-01 17:00 -0100", d])
- Traceback (most recent call last):
- ...
- ValueError: Tz-aware datetime.datetime cannot be converted to datetime64
- unless utc=True
-
|
Setting ``utc=True`` solves most of the above issues:
@@ -1243,7 +1223,7 @@ def coerce(values):
return values
-def _attempt_YYYYMMDD(arg: np.ndarray, errors: str) -> np.ndarray | None:
+def _attempt_YYYYMMDD(arg: npt.NDArray[np.object_], errors: str) -> np.ndarray | None:
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
@@ -1257,7 +1237,7 @@ def _attempt_YYYYMMDD(arg: np.ndarray, errors: str) -> np.ndarray | None:
def calc(carg):
# calculate the actual result
- carg = carg.astype(object)
+ carg = carg.astype(object, copy=False)
parsed = parsing.try_parse_year_month_day(
carg / 10000, carg / 100 % 100, carg % 100
)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 7597d4345cfce..0bd93a78227ff 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -5,6 +5,7 @@
from datetime import (
datetime,
timedelta,
+ timezone,
)
from decimal import Decimal
import locale
@@ -455,6 +456,14 @@ def test_to_datetime_parse_timezone_keeps_name(self):
class TestToDatetime:
+ def test_to_datetime_mixed_datetime_and_string(self):
+ # GH#47018 adapted old doctest with new behavior
+ d1 = datetime(2020, 1, 1, 17, tzinfo=timezone(-timedelta(hours=1)))
+ d2 = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1)))
+ res = to_datetime(["2020-01-01 17:00 -0100", d2])
+ expected = to_datetime([d1, d2]).tz_convert(pytz.FixedOffset(-60))
+ tm.assert_index_equal(res, expected)
+
def test_to_datetime_np_str(self):
# GH#32264
value = np.str_("2019-02-04 10:18:46.297000+0000")
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47018 | 2022-05-13T20:51:26Z | 2022-05-21T19:47:14Z | 2022-05-21T19:47:14Z | 2022-05-21T21:53:29Z |
BUG: Decay args constraint not enforced when times was provided | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9f1c4755bc54f..0f1b71c690ce0 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -150,6 +150,7 @@ Other enhancements
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
+- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
@@ -736,6 +737,7 @@ Groupby/resample/rolling
- Bug in :meth:`.Rolling.var` would segfault calculating weighted variance when window size was larger than data size (:issue:`46760`)
- Bug in :meth:`Grouper.__repr__` where ``dropna`` was not included. Now it is (:issue:`46754`)
- Bug in :meth:`DataFrame.rolling` gives ValueError when center=True, axis=1 and win_type is specified (:issue:`46135`)
+- Bug in :class:`.ExponentialMovingWindow` where ``alpha``, ``com``, or ``span`` were incorrectly allowed when ``times`` and ``halflife`` were passed (:issue:`47003`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 32cb4938344c4..8a6f7b4b798ee 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -389,22 +389,20 @@ def __init__(
raise ValueError("times must be datetime64[ns] dtype.")
if len(self.times) != len(obj):
raise ValueError("times must be the same length as the object.")
- if not isinstance(self.halflife, (str, datetime.timedelta)):
+ if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)):
raise ValueError(
"halflife must be a string or datetime.timedelta object"
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
- # Halflife is no longer applicable when calculating COM
- # But allow COM to still be calculated if the user passes other decay args
- if common.count_not_none(self.com, self.span, self.alpha) > 0:
- self._com = get_center_of_mass(self.com, self.span, None, self.alpha)
- else:
- self._com = 1.0
+ # GH 47003
+ # get_center_of_mass will validate and raise if the user has also
+ # passed in com, span or alpha (1.0 is a placeholder value)
+ self._com = get_center_of_mass(self.com, self.span, 1.0, self.alpha)
else:
if self.halflife is not None and isinstance(
- self.halflife, (str, datetime.timedelta)
+ self.halflife, (str, datetime.timedelta, np.timedelta64)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index f42a1a5449c5c..8977d1a0d9d1b 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -102,7 +102,7 @@ def engine_and_raw(request):
return request.param
-@pytest.fixture(params=["1 day", timedelta(days=1)])
+@pytest.fixture(params=["1 day", timedelta(days=1), np.timedelta64(1, "D")])
def halflife_with_times(request):
"""Halflife argument for EWM when times is specified."""
return request.param
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index b1e8b43258750..9acc34c67243a 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -666,3 +666,11 @@ def test_ewm_pairwise_cov_corr(func, frame):
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
+
+
+@pytest.mark.parametrize("decay", ["alpha", "com", "span"])
+def test_validate_times_halflife_with_other_decay(decay):
+ ser = Series([1, 2])
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
+ ser.ewm(**{decay: 1}, halflife="1 Day", times=DatetimeIndex(["2021", "2022"]))
| - [x] closes #47003 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47017 | 2022-05-13T20:09:00Z | 2022-05-15T01:34:35Z | null | 2022-05-15T01:34:38Z |
Unpin xarray | diff --git a/environment.yml b/environment.yml
index b4710e252384c..df69e654018fb 100644
--- a/environment.yml
+++ b/environment.yml
@@ -116,7 +116,7 @@ dependencies:
- fsspec>=0.7.4 # for generic remote file operations
- gcsfs>=0.6.0 # file IO when using 'gcs://...' path
- sqlalchemy # pandas.read_sql, DataFrame.to_sql
- - xarray<0.19 # DataFrame.to_xarray
+ - xarray # DataFrame.to_xarray
- cftime # Needed for downstream xarray.CFTimeIndex test
- pyreadstat # pandas.read_spss
- tabulate>=0.8.3 # DataFrame.to_markdown
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e1459a66a0f12..24c3bcb7bf669 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3074,7 +3074,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
>>> df_multiindex.to_xarray()
<xarray.Dataset>
- Dimensions: (animal: 2, date: 2)
+ Dimensions: (date: 2, animal: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 0f1d76b996df1..36d3720df34d8 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -80,7 +80,7 @@ aiobotocore<2.0.0
fsspec>=0.7.4
gcsfs>=0.6.0
sqlalchemy
-xarray<0.19
+xarray
cftime
pyreadstat
tabulate>=0.8.3
| - [X] closes [#42716](https://github.com/pandas-dev/pandas/issues/42716)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47016 | 2022-05-13T20:02:44Z | 2022-05-16T09:23:03Z | 2022-05-16T09:23:02Z | 2022-05-16T09:23:03Z |
CI: Ensure no-use-pep517 with no-build-isolation with new pip version | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 483353cfcb3cd..80448319f7918 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -104,6 +104,6 @@ echo "Build extensions"
python setup.py build_ext -q -j3
echo "Install pandas"
-python -m pip install --no-build-isolation -e .
+python -m pip install --no-build-isolation --no-use-pep517 -e .
echo "done"
| xref https://github.com/scipy/oldest-supported-numpy/issues/53
We may even be able to replace no-build-isolation with no-use-pep517 https://github.com/scikit-learn/scikit-learn/pull/23339 but that could be a followup. | https://api.github.com/repos/pandas-dev/pandas/pulls/47015 | 2022-05-13T16:21:34Z | 2022-05-15T13:49:48Z | 2022-05-15T13:49:48Z | 2022-05-15T13:54:31Z |
DOC: pyarrow >= 8.0.0 supports timedelta | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 4ed71913d7b4d..5228ef9f3c5b6 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -5470,7 +5470,7 @@ See the documentation for `pyarrow <https://arrow.apache.org/docs/python/>`__ an
.. note::
These engines are very similar and should read/write nearly identical parquet format files.
- Currently ``pyarrow`` does not support timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes.
+ ``pyarrow>=8.0.0`` supports timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes.
These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library).
.. ipython:: python
| 8.0.0 was released recently with timedelta support: https://arrow.apache.org/release/8.0.0.html, https://issues.apache.org/jira/browse/ARROW-6780
| https://api.github.com/repos/pandas-dev/pandas/pulls/47012 | 2022-05-13T11:30:54Z | 2022-05-15T02:57:45Z | 2022-05-15T02:57:45Z | 2022-05-15T02:57:50Z |
TYP: resolve mypy ingores in core/indexing.py | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3418f1cab0e6f..06b93622d3ca6 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -627,7 +627,7 @@ def iat(self) -> _iAtIndexer:
class _LocationIndexer(NDFrameIndexerBase):
_valid_types: str
- axis = None
+ axis: int | None = None
@final
def __call__(self, axis=None):
@@ -652,7 +652,7 @@ def _get_setitem_indexer(self, key):
check_deprecated_indexers(x)
if self.axis is not None:
- key = self._tupleize_axis_indexer(key)
+ key = _tupleize_axis_indexer(self.ndim, self.axis, key)
ax = self.obj._get_axis(0)
@@ -737,17 +737,6 @@ def _maybe_mask_setitem_value(self, indexer, value):
return indexer, value
- @final
- def _tupleize_axis_indexer(self, key) -> tuple:
- """
- If we have an axis, adapt the given key to be axis-independent.
- """
- new_key = [slice(None)] * self.ndim
- # error: Invalid index type "Optional[Any]" for "List[slice]"; expected
- # type "SupportsIndex"
- new_key[self.axis] = key # type: ignore[index]
- return tuple(new_key)
-
@final
def _ensure_listlike_indexer(self, key, axis=None, value=None):
"""
@@ -1621,7 +1610,7 @@ def _get_setitem_indexer(self, key):
key = list(key)
if self.axis is not None:
- key = self._tupleize_axis_indexer(key)
+ key = _tupleize_axis_indexer(self.ndim, self.axis, key)
return key
@@ -2137,13 +2126,11 @@ def _ensure_iterable_column_indexer(self, column_indexer):
"""
Ensure that our column indexer is something that can be iterated over.
"""
- ilocs: Sequence[int]
+ ilocs: Sequence[int] | np.ndarray
if is_integer(column_indexer):
ilocs = [column_indexer]
elif isinstance(column_indexer, slice):
- ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment]
- column_indexer
- ]
+ ilocs = np.arange(len(self.obj.columns))[column_indexer]
elif isinstance(column_indexer, np.ndarray) and is_bool_dtype(
column_indexer.dtype
):
@@ -2201,18 +2188,16 @@ def ravel(i):
# TODO: This is hacky, align Series and DataFrame behavior GH#45778
if obj.ndim == 2 and is_empty_indexer(indexer[0]):
return ser._values.copy()
- ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
+ ser_values = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
- ser = (
- np.tile(ser, len_indexer) # type: ignore[assignment]
- .reshape(len_indexer, -1)
- .T
+ ser_values = (
+ np.tile(ser_values, len_indexer).reshape(len_indexer, -1).T
)
- return ser
+ return ser_values
for i, idx in enumerate(indexer):
ax = obj.axes[i]
@@ -2428,6 +2413,15 @@ def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]:
return tuple(_tup)
+def _tupleize_axis_indexer(ndim: int, axis: int, key) -> tuple:
+ """
+ If we have an axis, adapt the given key to be axis-independent.
+ """
+ new_key = [slice(None)] * ndim
+ new_key[axis] = key
+ return tuple(new_key)
+
+
def convert_to_index_sliceable(obj: DataFrame, key):
"""
If we are index sliceable, then return my slicer, otherwise return None.
| xref https://github.com/pandas-dev/pandas/issues/37715
Notes: `axis` has to be passed as an argument to `_tupleize_axis_indexer` in order to assert that it is not `None`. Since this kind of decouples it from the `_LocationIndexer` class, I made it a standalone utility function. | https://api.github.com/repos/pandas-dev/pandas/pulls/47010 | 2022-05-12T22:25:21Z | 2022-05-25T22:45:24Z | 2022-05-25T22:45:24Z | 2022-05-25T22:45:31Z |
Backport PR #46981 on branch 1.4.x (CI: Move MacOS build from Azure to GHA) | diff --git a/.github/workflows/windows.yml b/.github/workflows/macos-windows.yml
similarity index 70%
rename from .github/workflows/windows.yml
rename to .github/workflows/macos-windows.yml
index 6f267357554a3..560a421ec74ec 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -1,4 +1,4 @@
-name: Windows
+name: Windows-MacOS
on:
push:
@@ -21,18 +21,20 @@ env:
jobs:
pytest:
- runs-on: windows-latest
defaults:
run:
shell: bash -el {0}
timeout-minutes: 90
strategy:
matrix:
+ os: [macos-latest, windows-latest]
env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
fail-fast: false
+ runs-on: ${{ matrix.os }}
+ name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }}
concurrency:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }}
cancel-in-progress: true
steps:
@@ -47,10 +49,17 @@ jobs:
mamba-version: "*"
channels: conda-forge
activate-environment: pandas-dev
- channel-priority: strict
+ channel-priority: ${{ matrix.os == 'macos-latest' && 'flexible' || 'strict' }}
environment-file: ci/deps/${{ matrix.env_file }}
use-only-tar-bz2: true
+ # ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
+ # Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
+ # Reason: image not found
+ - name: Upgrade pyarrow on MacOS
+ run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
+ if: ${{ matrix.os == 'macos-latest' }}
+
- name: Build Pandas
uses: ./.github/actions/build_pandas
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 0b2a9f5b2b0cd..0c6195ff6924b 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -22,11 +22,6 @@ variables:
PANDAS_CI: 1
jobs:
-- template: ci/azure/posix.yml
- parameters:
- name: macOS
- vmImage: macOS-10.15
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
deleted file mode 100644
index df1d5049be33d..0000000000000
--- a/ci/azure/posix.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
- displayName: 'Set conda path'
-
- - script: rm /usr/local/miniconda/pkgs/cache/*.json
- displayName: 'Workaround for mamba-org/mamba#488'
-
- - script: ci/setup_env.sh
- displayName: 'Setup environment and build pandas'
-
- - script: |
- conda run -n pandas-dev --no-capture-output ci/run_tests.sh
- displayName: 'Test'
-
- - script: |
- pushd /tmp
- conda run -n pandas-dev python -c "import pandas; pandas.show_versions()"
- popd
- displayName: 'Build versions'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index a85767eb6f1b4..483353cfcb3cd 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -73,15 +73,6 @@ mamba install -n pandas-dev 'setuptools<60'
echo "conda list -n pandas-dev"
conda list -n pandas-dev
-# From pyarrow on MacOS
-# ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
-# Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
-# Reason: image not found
-if [[ "$(uname)" == 'Darwin' ]]; then
- echo "Update pyarrow for pyarrow on MacOS"
- conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
-fi
-
if [[ "$BITS32" == "yes" ]]; then
# activate 32-bit compiler
export CONDA_BUILD=1
| Backport PR #46981: CI: Move MacOS build from Azure to GHA | https://api.github.com/repos/pandas-dev/pandas/pulls/47007 | 2022-05-12T15:55:05Z | 2022-05-15T04:32:06Z | 2022-05-15T04:32:06Z | 2022-05-15T04:32:06Z |
Simplify Timedelta init, standardize overflow errors | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 4eb1494c4d56c..3ce75d09f78e8 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1,4 +1,5 @@
import collections
+import operator
import warnings
cimport cython
@@ -55,6 +56,7 @@ from pandas._libs.tslibs.np_datetime cimport (
pandas_timedelta_to_timedeltastruct,
pandas_timedeltastruct,
)
+from pandas._libs.util cimport INT64_MAX
from pandas._libs.tslibs.np_datetime import OutOfBoundsTimedelta
@@ -217,12 +219,11 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
+ delta.microseconds
) * 1000
except OverflowError as err:
- raise OutOfBoundsTimedelta(*err.args) from err
-
+ msg = f"{delta} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from err
raise TypeError(type(delta))
-@cython.overflowcheck(True)
cdef object ensure_td64ns(object ts):
"""
Overflow-safe implementation of td64.astype("m8[ns]")
@@ -241,24 +242,20 @@ cdef object ensure_td64ns(object ts):
str unitstr
td64_unit = get_datetime64_unit(ts)
- if (
- td64_unit != NPY_DATETIMEUNIT.NPY_FR_ns
- and td64_unit != NPY_DATETIMEUNIT.NPY_FR_GENERIC
- ):
- unitstr = npy_unit_to_abbrev(td64_unit)
+ if td64_unit == NPY_DATETIMEUNIT.NPY_FR_ns or td64_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
+ return ts
- td64_value = get_timedelta64_value(ts)
+ unitstr = npy_unit_to_abbrev(td64_unit)
+ mult = precision_from_unit(unitstr)[0]
- mult = precision_from_unit(unitstr)[0]
+ with cython.overflowcheck(True):
try:
- # NB: cython#1381 this cannot be *=
- td64_value = td64_value * mult
+ td64_value = get_timedelta64_value(ts) * mult
except OverflowError as err:
- raise OutOfBoundsTimedelta(ts) from err
+ msg = f"{str(ts)} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from err
- return np.timedelta64(td64_value, "ns")
-
- return ts
+ return np.timedelta64(td64_value, "ns")
cdef convert_to_timedelta64(object ts, str unit):
@@ -674,8 +671,7 @@ cdef bint _validate_ops_compat(other):
def _op_unary_method(func, name):
def f(self):
- new_value = func(self.value)
- return _timedelta_from_value_and_reso(new_value, self._reso)
+ return create_timedelta(func(self.value), "ignore", self._reso)
f.__name__ = name
return f
@@ -724,13 +720,7 @@ def _binary_op_method_timedeltalike(op, name):
if self._reso != other._reso:
raise NotImplementedError
- res = op(self.value, other.value)
- if res == NPY_NAT:
- # e.g. test_implementation_limits
- # TODO: more generally could do an overflowcheck in op?
- return NaT
-
- return _timedelta_from_value_and_reso(res, reso=self._reso)
+ return create_timedelta(op(self.value, other.value), "ignore", self._reso)
f.__name__ = name
return f
@@ -861,7 +851,7 @@ cdef _to_py_int_float(v):
def _timedelta_unpickle(value, reso):
- return _timedelta_from_value_and_reso(value, reso)
+ return create_timedelta(value, "ignore", reso)
cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
@@ -892,6 +882,68 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
return td_base
+@cython.overflowcheck(True)
+cdef object create_timedelta(object value, str in_unit, NPY_DATETIMEUNIT out_reso):
+ """
+ Timedelta factory.
+
+ Overflow-safe, and allows for the creation of Timedeltas with non-nano resos while
+ the public API for that gets hashed out (ref: GH#46587). For now, Timedelta.__new__
+ just does arg validation and kwarg processing.
+
+ _timedelta_from_value_and_reso faster if value already an int that can be safely
+ cast to an int64.
+
+ Parameters
+ ----------
+ value : Timedelta, timedelta, np.timedelta64, str, int, float
+ The same value types accepted by Timedelta.__new__
+ in_unit : str
+ Denote the (np) unit of the input, if it's numeric
+ out_reso: NPY_DATETIMEUNIT
+ Desired resolution of new Timedelta
+
+ Notes
+ -----
+ Pass in_unit="ignore" (or "ns") with a numeric value to just do overflow checking
+ (and bypass the prior behavior of converting value -> td64[ns] -> int)
+ """
+ cdef:
+ int64_t out_value
+
+ if isinstance(value, _Timedelta):
+ return value
+
+ try:
+ # if unit == "ns", no need to create an m8[ns] just to read the (same) value back
+ # if unit == "ignore", assume caller wants to invoke an overflow-safe version of
+ # _timedelta_from_value_and_reso, and that any float rounding is acceptable
+ if (is_integer_object(value) or is_float_object(value)) and (
+ in_unit == "ns" or in_unit == "ignore"
+ ):
+ if util.is_nan(value):
+ return NaT
+ out_value = <int64_t>value
+ # is_timedelta_64_object may not give correct results w/ some versions? see e.g.
+ # github.com/pandas-dev/pandas/runs/6397652653?check_suite_focus=true#step:11:435
+ elif isinstance(value, np.timedelta64):
+ out_value = ensure_td64ns(value).view(np.int64)
+ elif isinstance(value, str):
+ if value.startswith(("P", "-P")):
+ out_value = parse_iso_format_string(value)
+ else:
+ out_value = parse_timedelta_string(value)
+ else:
+ out_value = convert_to_timedelta64(value, in_unit).view(np.int64)
+ except OverflowError as err:
+ msg = f"{value} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from err
+
+ if out_value == NPY_NAT:
+ return NaT
+ return _timedelta_from_value_and_reso(out_value, out_reso)
+
+
# Similar to Timestamp/datetime, this is a construction requirement for
# timedeltas that we need to do object instantiation in python. This will
# serve as a C extension type that shadows the Python class, where we do any
@@ -1375,7 +1427,7 @@ cdef class _Timedelta(timedelta):
@classmethod
def _from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso):
# exposing as classmethod for testing
- return _timedelta_from_value_and_reso(value, reso)
+ return create_timedelta(value, "ignore", reso)
# Python front end to C extension type _Timedelta
@@ -1438,99 +1490,49 @@ class Timedelta(_Timedelta):
We see that either way we get the same result
"""
- _req_any_kwargs_new = {"weeks", "days", "hours", "minutes", "seconds",
- "milliseconds", "microseconds", "nanoseconds"}
+ _allowed_kwargs = (
+ "weeks", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds",
+ "nanoseconds"
+ )
def __new__(cls, object value=_no_input, unit=None, **kwargs):
- cdef _Timedelta td_base
+ cdef:
+ NPY_DATETIMEUNIT out_reso = NPY_FR_ns
+ # process kwargs iff no value passed
if value is _no_input:
- if not len(kwargs):
- raise ValueError("cannot construct a Timedelta without a "
- "value/unit or descriptive keywords "
- "(days,seconds....)")
-
- kwargs = {key: _to_py_int_float(kwargs[key]) for key in kwargs}
-
- unsupported_kwargs = set(kwargs)
- unsupported_kwargs.difference_update(cls._req_any_kwargs_new)
- if unsupported_kwargs or not cls._req_any_kwargs_new.intersection(kwargs):
+ if not kwargs:
+ raise ValueError(
+ "cannot construct a Timedelta without a value/unit "
+ "or descriptive keywords (days,seconds....)"
+ )
+ if not kwargs.keys() <= set(cls._allowed_kwargs):
raise ValueError(
"cannot construct a Timedelta from the passed arguments, "
- "allowed keywords are "
- "[weeks, days, hours, minutes, seconds, "
- "milliseconds, microseconds, nanoseconds]"
+ f"allowed keywords are {cls._allowed_kwargs}"
)
-
- # GH43764, convert any input to nanoseconds first and then
- # create the timestamp. This ensures that any potential
- # nanosecond contributions from kwargs parsed as floats
- # are taken into consideration.
- seconds = int((
- (
- (kwargs.get('days', 0) + kwargs.get('weeks', 0) * 7) * 24
- + kwargs.get('hours', 0)
- ) * 3600
- + kwargs.get('minutes', 0) * 60
- + kwargs.get('seconds', 0)
- ) * 1_000_000_000
- )
-
- value = np.timedelta64(
- int(kwargs.get('nanoseconds', 0))
- + int(kwargs.get('microseconds', 0) * 1_000)
- + int(kwargs.get('milliseconds', 0) * 1_000_000)
- + seconds
+ # GH43764, convert any input to nanoseconds first, to ensure any potential
+ # nanosecond contributions from kwargs parsed as floats are included
+ ns = (
+ _to_py_int_float(kwargs.get("weeks", 0)) * 7 * 24 * 3600 * 1_000_000_000
+ + _to_py_int_float(kwargs.get("days", 0)) * 24 * 3600 * 1_000_000_000
+ + _to_py_int_float(kwargs.get("hours", 0)) * 3600 * 1_000_000_000
+ + _to_py_int_float(kwargs.get("minutes", 0)) * 60 * 1_000_000_000
+ + _to_py_int_float(kwargs.get("seconds", 0)) * 1_000_000_000
+ + _to_py_int_float(kwargs.get("milliseconds", 0)) * 1_000_000
+ + _to_py_int_float(kwargs.get("microseconds", 0)) * 1_000
+ + _to_py_int_float(kwargs.get("nanoseconds", 0))
)
+ return create_timedelta(ns, "ns", out_reso)
- if unit in {'Y', 'y', 'M'}:
+ if isinstance(value, str) and unit is not None:
+ raise ValueError("unit must not be specified if the value is a str")
+ elif unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
-
- # GH 30543 if pd.Timedelta already passed, return it
- # check that only value is passed
- if isinstance(value, _Timedelta) and unit is None and len(kwargs) == 0:
- return value
- elif isinstance(value, _Timedelta):
- value = value.value
- elif isinstance(value, str):
- if unit is not None:
- raise ValueError("unit must not be specified if the value is a str")
- if (len(value) > 0 and value[0] == 'P') or (
- len(value) > 1 and value[:2] == '-P'
- ):
- value = parse_iso_format_string(value)
- else:
- value = parse_timedelta_string(value)
- value = np.timedelta64(value)
- elif PyDelta_Check(value):
- value = convert_to_timedelta64(value, 'ns')
- elif is_timedelta64_object(value):
- value = ensure_td64ns(value)
- elif is_tick_object(value):
- value = np.timedelta64(value.nanos, 'ns')
- elif is_integer_object(value) or is_float_object(value):
- # unit=None is de-facto 'ns'
- unit = parse_timedelta_unit(unit)
- value = convert_to_timedelta64(value, unit)
- elif checknull_with_nat(value):
- return NaT
- else:
- raise ValueError(
- "Value must be Timedelta, string, integer, "
- f"float, timedelta or convertible, not {type(value).__name__}"
- )
-
- if is_timedelta64_object(value):
- value = value.view('i8')
-
- # nat
- if value == NPY_NAT:
- return NaT
-
- return _timedelta_from_value_and_reso(value, NPY_FR_ns)
+ return create_timedelta(value, parse_timedelta_unit(unit), out_reso)
def __setstate__(self, state):
if len(state) == 1:
@@ -1607,30 +1609,25 @@ class Timedelta(_Timedelta):
# Arithmetic Methods
# TODO: Can some of these be defined in the cython class?
- __neg__ = _op_unary_method(lambda x: -x, '__neg__')
- __pos__ = _op_unary_method(lambda x: x, '__pos__')
- __abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
+ __neg__ = _op_unary_method(operator.neg, "__neg__")
+ __pos__ = _op_unary_method(operator.pos, "__pos__")
+ __abs__ = _op_unary_method(operator.abs, "__abs__")
- __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__')
- __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__')
- __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__')
- __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__')
+ __add__ = _binary_op_method_timedeltalike(operator.add, "__add__")
+ __radd__ = _binary_op_method_timedeltalike(operator.add, "__radd__")
+ __sub__ = _binary_op_method_timedeltalike(operator.sub, "__sub__")
+ __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, "__rsub__")
def __mul__(self, other):
- if is_integer_object(other) or is_float_object(other):
- if util.is_nan(other):
- # np.nan * timedelta -> np.timedelta64("NaT"), in this case NaT
- return NaT
-
- return _timedelta_from_value_and_reso(
- <int64_t>(other * self.value),
- reso=self._reso,
- )
-
- elif is_array(other):
+ if util.is_nan(other):
+ # np.nan * timedelta -> np.timedelta64("NaT"), in this case NaT
+ return NaT
+ if is_array(other):
# ndarray-like
return other * self.to_timedelta64()
-
+ if is_integer_object(other) or is_float_object(other):
+ # can't call Timedelta b/c it doesn't (yet) expose reso
+ return create_timedelta(self.value * other, "ignore", self._reso)
return NotImplemented
__rmul__ = __mul__
@@ -1825,6 +1822,6 @@ cdef _broadcast_floordiv_td64(
# resolution in ns
-Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1)
-Timedelta.max = Timedelta(np.iinfo(np.int64).max)
+Timedelta.min = Timedelta(NPY_NAT + 1)
+Timedelta.max = Timedelta(INT64_MAX)
Timedelta.resolution = Timedelta(nanoseconds=1)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index abdb4aebb625f..c25a9d08c60b8 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -90,7 +90,10 @@ from pandas._libs.tslibs.np_datetime cimport (
pydatetime_to_dt64,
)
-from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
+from pandas._libs.tslibs.np_datetime import (
+ OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
+)
from pandas._libs.tslibs.offsets cimport (
BaseOffset,
@@ -435,14 +438,13 @@ cdef class _Timestamp(ABCTimestamp):
# Timedelta
try:
return Timedelta(self.value - other.value)
- except (OverflowError, OutOfBoundsDatetime) as err:
- if isinstance(other, _Timestamp):
- if both_timestamps:
- raise OutOfBoundsDatetime(
- "Result is too large for pandas.Timedelta. Convert inputs "
- "to datetime.datetime with 'Timestamp.to_pydatetime()' "
- "before subtracting."
- ) from err
+ except OutOfBoundsTimedelta as err:
+ if both_timestamps:
+ raise OutOfBoundsTimedelta(
+ "Result is too large for pandas.Timedelta. Convert inputs "
+ "to datetime.datetime with 'Timestamp.to_pydatetime()' "
+ "before subtracting."
+ ) from err
# We get here in stata tests, fall back to stdlib datetime
# method and return stdlib timedelta object
pass
@@ -461,7 +463,7 @@ cdef class _Timestamp(ABCTimestamp):
if PyDateTime_Check(other):
try:
return type(self)(other) - self
- except (OverflowError, OutOfBoundsDatetime) as err:
+ except (OverflowError, OutOfBoundsDatetime, OutOfBoundsTimedelta) as err:
# We get here in stata tests, fall back to stdlib datetime
# method and return stdlib timedelta object
pass
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 74aa7f045088e..955dc86285a20 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -22,6 +22,10 @@
import pandas._testing as tm
from pandas.core import ops
+TD_OVERFLOW_MSG = (
+ r"outside allowed range \[-9223372036854775807ns, 9223372036854775807ns\]"
+)
+
class TestTimedeltaAdditionSubtraction:
"""
@@ -99,11 +103,10 @@ def test_td_add_datetimelike_scalar(self, op):
assert result is NaT
def test_td_add_timestamp_overflow(self):
- msg = "int too (large|big) to convert"
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timestamp("1700-01-01") + Timedelta(13 * 19999, unit="D")
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timestamp("1700-01-01") + timedelta(days=13 * 19999)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index 7fc7bd3a5a74d..92937854679d0 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -12,19 +12,20 @@
to_timedelta,
)
+TD_OVERFLOW_MSG = (
+ r"outside allowed range \[-9223372036854775807ns, 9223372036854775807ns\]"
+)
+
def test_construct_from_td64_with_unit():
# ignore the unit, as it may cause silently overflows leading to incorrect
# results, and in non-overflow cases is irrelevant GH#46827
obj = np.timedelta64(123456789, "h")
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
- Timedelta(obj, unit="ps")
-
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(obj, unit="ns")
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(obj)
@@ -204,18 +205,16 @@ def test_td_from_repr_roundtrip(val):
def test_overflow_on_construction():
- msg = "int too (large|big) to convert"
-
# GH#3374
value = Timedelta("1day").value * 20169940
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(value)
# xref GH#17637
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(7 * 19999, unit="D")
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(timedelta(days=13 * 19999))
@@ -238,8 +237,7 @@ def test_construction_out_of_bounds_td64(val, unit, name):
td64 = np.timedelta64(val, unit)
assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong
- msg = str(val) + name
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(td64)
# But just back in bounds and we are OK
@@ -248,7 +246,7 @@ def test_construction_out_of_bounds_td64(val, unit, name):
td64 *= -1
assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong
- with pytest.raises(OutOfBoundsTimedelta, match="-" + msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(td64)
# But just back in bounds and we are OK
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index cf7211e82b799..87550022b414f 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -13,6 +13,7 @@
NaT,
iNaT,
)
+from pandas._libs.tslibs.np_datetime import OutOfBoundsTimedelta
import pandas as pd
from pandas import (
@@ -23,6 +24,10 @@
)
import pandas._testing as tm
+TD_OVERFLOW_MSG = (
+ r"outside allowed range \[-9223372036854775807ns, 9223372036854775807ns\]"
+)
+
class TestNonNano:
@pytest.fixture(params=[7, 8, 9])
@@ -658,21 +663,20 @@ def test_implementation_limits(self):
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
- msg = "int too (large|big) to convert"
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
min_td - Timedelta(2, "ns")
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, "ns")
assert td is NaT
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(min_td.value - 2, "ns")
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
Timedelta(max_td.value + 1, "ns")
def test_total_seconds_precision(self):
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index b46962fb82896..5475b0d9042e1 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -8,7 +8,7 @@
import pytest
from pandas._libs.tslibs import (
- OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
Timedelta,
Timestamp,
offsets,
@@ -17,6 +17,10 @@
import pandas._testing as tm
+TD_OVERFLOW_MSG = (
+ r"outside allowed range \[-9223372036854775807ns, 9223372036854775807ns\]"
+)
+
class TestTimestampArithmetic:
def test_overflow_offset(self):
@@ -39,11 +43,6 @@ def test_overflow_offset_raises(self):
stamp = Timestamp("2017-01-13 00:00:00")
offset_overflow = 20169940 * offsets.Day(1)
- msg = (
- "the add operation between "
- r"\<-?\d+ \* Days\> and \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} "
- "will overflow"
- )
lmsg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
@@ -51,7 +50,7 @@ def test_overflow_offset_raises(self):
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
@@ -61,12 +60,12 @@ def test_overflow_offset_raises(self):
# used to crash, so check for proper overflow exception
stamp = Timestamp("2000/1/1")
- offset_overflow = to_offset("D") * 100**5
+ offset_overflow = offsets.Day() * 100**5
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=TD_OVERFLOW_MSG):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
@@ -78,7 +77,7 @@ def test_overflow_timestamp_raises(self):
a = Timestamp("2101-01-01 00:00:00")
b = Timestamp("1688-01-01 00:00:00")
- with pytest.raises(OutOfBoundsDatetime, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
a - b
# but we're OK for timestamp and datetime.datetime
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 0bd93a78227ff..e1307c6543a51 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1848,13 +1848,8 @@ def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
- msg = "|".join(
- [
- "Python int too large to convert to C long",
- "long too big to convert",
- "int too big to convert",
- ]
- )
+ # a fixture exists in tests/scalar; should it be moved to a higher level?
+ msg = R"outside allowed range \[-9223372036854775807ns, 9223372036854775807ns\]"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 6c11ec42858c0..fcb2575f2251d 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -109,9 +109,7 @@ def test_to_timedelta_invalid_unit(self, arg):
def test_to_timedelta_time(self):
# time not supported ATM
- msg = (
- "Value must be Timedelta, string, integer, float, timedelta or convertible"
- )
+ msg = "Invalid type for timedelta scalar"
with pytest.raises(ValueError, match=msg):
to_timedelta(time(second=1))
assert to_timedelta(time(second=1), errors="coerce") is pd.NaT
@@ -264,10 +262,7 @@ def test_to_timedelta_zerodim(self, fixed_now_ts):
dt64 = fixed_now_ts.to_datetime64()
arg = np.array(dt64)
- msg = (
- "Value must be Timedelta, string, integer, float, timedelta "
- "or convertible, not datetime64"
- )
+ msg = "Invalid type for timedelta scalar"
with pytest.raises(ValueError, match=msg):
to_timedelta(arg)
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index d9e86d53f2587..d45d79b5e1896 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -1,5 +1,3 @@
-import re
-
import numpy as np
import pytest
@@ -65,14 +63,8 @@ def test_huge_nanoseconds_overflow():
"kwargs", [{"Seconds": 1}, {"seconds": 1, "Nanoseconds": 1}, {"Foo": 2}]
)
def test_kwarg_assertion(kwargs):
- err_message = (
- "cannot construct a Timedelta from the passed arguments, "
- "allowed keywords are "
- "[weeks, days, hours, minutes, seconds, "
- "milliseconds, microseconds, nanoseconds]"
- )
-
- with pytest.raises(ValueError, match=re.escape(err_message)):
+ msg = "cannot construct a Timedelta from the passed arguments"
+ with pytest.raises(ValueError, match=msg):
Timedelta(**kwargs)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Per https://github.com/pandas-dev/pandas/pull/46936#issuecomment-1122981776, this PR extracts the functional updates in #46936, to simplify review. The main motivations are those explained in https://github.com/pandas-dev/pandas/pull/46936#discussion_r869761730:
> - DRY up `Timedelta` creation logic, which was divided and duplicated over several functions
> - Do so outside `Timdelta.__new__`, since (AFAICT) there shouldn't yet be a public API for creating instances with non-nano resolution
> - Add overflow checks, and consistently raise `OutOfBoundsTimedelta` when it's detected.
As a bonus, this appears to speed things up considerably, especially in the case of passing `int`s (by avoiding the unnecessary `int -> td64[ns] -> int` conversion):
```
before after ratio
[a853022e] [d4444404]
<main> <td-construction>
- 8.41±0.8μs 7.50±0.2μs 0.89 tslibs.timedelta.TimedeltaConstructor.time_from_unit
- 7.99±0.3μs 4.42±0.1μs 0.55 tslibs.timedelta.TimedeltaConstructor.time_from_string
- 2.80±0.03μs 758±40ns 0.27 tslibs.timedelta.TimedeltaConstructor.time_from_missing
- 6.88±0.3μs 1.67±0.2μs 0.24 tslibs.timedelta.TimedeltaConstructor.time_from_int
```
I've left out all the test suite updates from #46936, save for those changes associated with `s/OverflowError/OutOfBoundsTimedelta`.
/cc @jbrockmendel @jreback | https://api.github.com/repos/pandas-dev/pandas/pulls/47004 | 2022-05-11T20:22:31Z | 2022-08-15T16:31:40Z | null | 2022-08-15T16:31:41Z |
Bug fix dataframe interval transpose | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 51ca9dbd763b4..a2699996ddfc8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3451,7 +3451,14 @@ def transpose(self, *args, copy: bool = False) -> DataFrame:
arr_type = dtype.construct_array_type()
values = self.values
- new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
+ new_values = []
+ for row in values:
+ try:
+ new_values.append(arr_type._from_sequence(row, dtype=dtype))
+ except Exception:
+ new_values.append(row)
+
+ new_values = values
result = type(self)._from_arrays(
new_values, index=self.columns, columns=self.index
)
| - [ ] closes #44917
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47001 | 2022-05-11T17:15:13Z | 2022-05-12T15:23:42Z | null | 2022-05-31T16:12:47Z |
ENH: do not sort resulting columns when sort=False | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 128fd68674f96..5111ffbda14f9 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -745,6 +745,7 @@ Reshaping
- Bug in concanenation with ``IntegerDtype``, or ``FloatingDtype`` arrays where the resulting dtype did not mirror the behavior of the non-nullable dtypes (:issue:`46379`)
- Bug in :func:`concat` with identical key leads to error when indexing :class:`MultiIndex` (:issue:`46519`)
- Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`)
+- Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`)
-
Sparse
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index b428155e722ff..8c861c199169b 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -216,7 +216,7 @@ def __internal_pivot_table(
)
table = table.reindex(m, axis=1)
- if isinstance(table, ABCDataFrame):
+ if sort is True and isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 31f720b9ec336..3471c83638126 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -2167,6 +2167,28 @@ def test_pivot_table_sort_false(self):
)
tm.assert_frame_equal(result, expected)
+ def test_pivot_table_sort_false_with_multiple_values(self):
+ df = DataFrame(
+ {
+ "firstname": ["John", "Michael"],
+ "lastname": ["Foo", "Bar"],
+ "height": [173, 182],
+ "age": [47, 33],
+ }
+ )
+ result = df.pivot_table(
+ index=["lastname", "firstname"], values=["height", "age"], sort=False
+ )
+ expected = DataFrame(
+ [[173, 47], [182, 33]],
+ columns=["height", "age"],
+ index=MultiIndex.from_tuples(
+ [("Foo", "John"), ("Bar", "Michael")],
+ names=["lastname", "firstname"],
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_pivot_table_with_margins_and_numeric_columns(self):
# GH 26568
df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]])
| - [x] closes #17041
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Currently, `pivot_table` sorts the resulting columns even when `sort=False`. For example, the following code snippet produces the column ["age", "height"] in alphabetical order instead of the order in the original `df` or passed by `values=`.
```py
import pandas as pd
df = pd.DataFrame(
{
"firstname": ["John", "Michael"],
"lastname": ["Foo", "Bar"],
"height": [173, 182],
"age": [47, 33],
}
)
result = df.pivot_table(index=["lastname", "firstname"], values=["height", "age"], sort=False)
print(result)
```
```
age height
lastname firstname
Foo John 47 173
Bar Michael 33 182
```
This PR fixes this issue by not sorting the resulting columns when `sort=False`
FYI, #17041 has some discussion about the order of the columns. PR #40954 added the keyword `sort` to `pivot_table` and was merged in v1.3. | https://api.github.com/repos/pandas-dev/pandas/pulls/46994 | 2022-05-11T06:10:41Z | 2022-05-19T14:36:21Z | 2022-05-19T14:36:20Z | 2022-05-19T17:34:26Z |
CLN: DatetimeTZBlock don't override values_for_json | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 3836f3e6540b4..421fac4ea767b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1989,11 +1989,9 @@ class DatetimeTZBlock(DatetimeLikeBlock):
_validate_ndim = True
_can_consolidate = False
- def values_for_json(self) -> np.ndarray:
- # force dt64tz to go through object dtype
- # tz info will be lost when converting to
- # dt64 which is naive
- return self.values.astype(object)
+ # Don't use values_for_json from DatetimeLikeBlock since it is
+ # an invalid optimization here(drop the tz)
+ values_for_json = NDArrayBackedExtensionBlock.values_for_json
class ObjectBlock(NumpyBlock):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46993 | 2022-05-11T01:27:21Z | 2022-06-06T19:25:29Z | 2022-06-06T19:25:29Z | 2022-06-06T19:25:36Z |
Backport PR #46991 on branch 1.4.x (CI/TST: Fix test for pyarrow 8.0 release) | diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index 00b205d018e89..eef2bb6639c36 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -14,6 +14,7 @@
pa_version_under5p0 = _palv < Version("5.0.0")
pa_version_under6p0 = _palv < Version("6.0.0")
pa_version_under7p0 = _palv < Version("7.0.0")
+ pa_version_under8p0 = _palv < Version("8.0.0")
except ImportError:
pa_version_under1p01 = True
pa_version_under2p0 = True
@@ -22,3 +23,4 @@
pa_version_under5p0 = True
pa_version_under6p0 = True
pa_version_under7p0 = True
+ pa_version_under8p0 = True
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index d2a3a2eebef02..3df59a2eeef1f 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -17,6 +17,7 @@
pa_version_under2p0,
pa_version_under5p0,
pa_version_under6p0,
+ pa_version_under8p0,
)
import pandas.util._test_decorators as td
@@ -717,11 +718,14 @@ def test_duplicate_columns(self, pa):
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
- def test_unsupported(self, pa):
- # timedelta
+ def test_timedelta(self, pa):
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
- self.check_external_error_on_write(df, pa, NotImplementedError)
+ if pa_version_under8p0:
+ self.check_external_error_on_write(df, pa, NotImplementedError)
+ else:
+ check_round_trip(df, pa)
+ def test_unsupported(self, pa):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
| Backport PR #46991: CI/TST: Fix test for pyarrow 8.0 release | https://api.github.com/repos/pandas-dev/pandas/pulls/46992 | 2022-05-11T01:11:08Z | 2022-05-11T12:07:07Z | 2022-05-11T12:07:07Z | 2022-05-11T12:07:08Z |
CI/TST: Fix test for pyarrow 8.0 release | diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index 00b205d018e89..eef2bb6639c36 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -14,6 +14,7 @@
pa_version_under5p0 = _palv < Version("5.0.0")
pa_version_under6p0 = _palv < Version("6.0.0")
pa_version_under7p0 = _palv < Version("7.0.0")
+ pa_version_under8p0 = _palv < Version("8.0.0")
except ImportError:
pa_version_under1p01 = True
pa_version_under2p0 = True
@@ -22,3 +23,4 @@
pa_version_under5p0 = True
pa_version_under6p0 = True
pa_version_under7p0 = True
+ pa_version_under8p0 = True
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 7c04a51e803f6..5b899079dfffd 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -18,6 +18,7 @@
pa_version_under2p0,
pa_version_under5p0,
pa_version_under6p0,
+ pa_version_under8p0,
)
import pandas.util._test_decorators as td
@@ -718,11 +719,14 @@ def test_duplicate_columns(self, pa):
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
- def test_unsupported(self, pa):
- # timedelta
+ def test_timedelta(self, pa):
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
- self.check_external_error_on_write(df, pa, NotImplementedError)
+ if pa_version_under8p0:
+ self.check_external_error_on_write(df, pa, NotImplementedError)
+ else:
+ check_round_trip(df, pa)
+ def test_unsupported(self, pa):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
| - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/46991 | 2022-05-10T22:30:11Z | 2022-05-11T01:10:27Z | 2022-05-11T01:10:27Z | 2022-05-11T01:16:22Z |
ENH: non-nano Timestamp.timestamp, to_period | diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 8cc7bcb2a1aad..833ba4ce70bd7 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -6,6 +6,7 @@ from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT
cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
+cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
cdef dict attrname_to_abbrevs
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 0c4d4c5c235b5..3be21ba754f27 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -348,6 +348,19 @@ cdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns) e
return day_units
+cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1:
+ if reso == NPY_DATETIMEUNIT.NPY_FR_ns:
+ return 1_000_000_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
+ return 1_000_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
+ return 1_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
+ return 1
+ else:
+ raise NotImplementedError(reso)
+
+
cdef dict _reso_str_map = {
Resolution.RESO_NS.value: "nanosecond",
Resolution.RESO_US.value: "microsecond",
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 923d1f830e1a9..fcc9390a2cccd 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -52,7 +52,11 @@ from pandas._libs.tslibs.conversion cimport (
convert_datetime_to_tsobject,
convert_to_tsobject,
)
-from pandas._libs.tslibs.dtypes cimport npy_unit_to_abbrev
+from pandas._libs.tslibs.dtypes cimport (
+ npy_unit_to_abbrev,
+ periods_per_day,
+ periods_per_second,
+)
from pandas._libs.tslibs.util cimport (
is_array,
is_datetime64_object,
@@ -811,11 +815,12 @@ cdef class _Timestamp(ABCTimestamp):
cdef:
local_val = self._maybe_convert_value_to_local()
int64_t normalized
+ int64_t ppd = periods_per_day(self._reso)
if self._reso != NPY_FR_ns:
raise NotImplementedError(self._reso)
- normalized = normalize_i8_stamp(local_val)
+ normalized = normalize_i8_stamp(local_val, ppd)
return Timestamp(normalized).tz_localize(self.tzinfo)
# -----------------------------------------------------------------
@@ -834,8 +839,8 @@ cdef class _Timestamp(ABCTimestamp):
if len(state) == 3:
# pre-non-nano pickle
+ # TODO: no tests get here 2022-05-10
reso = NPY_FR_ns
- assert False # checking for coverage
else:
reso = state[4]
self._reso = reso
@@ -982,10 +987,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
# GH 17329
# Note: Naive timestamps will not match datetime.stdlib
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
- return round(self.value / 1e9, 6)
+ denom = periods_per_second(self._reso)
+
+ return round(self.value / denom, 6)
cpdef datetime to_pydatetime(_Timestamp self, bint warn=True):
"""
@@ -1080,9 +1085,6 @@ cdef class _Timestamp(ABCTimestamp):
"""
from pandas import Period
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
if self.tz is not None:
# GH#21333
warnings.warn(
@@ -2252,16 +2254,18 @@ Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365
@cython.cdivision(False)
-cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil:
+cdef inline int64_t normalize_i8_stamp(int64_t local_val, int64_t ppd) nogil:
"""
Round the localized nanosecond timestamp down to the previous midnight.
Parameters
----------
local_val : int64_t
+ ppd : int64_t
+ Periods per day in the Timestamp's resolution.
Returns
-------
int64_t
"""
- return local_val - (local_val % ccalendar.DAY_NANOS)
+ return local_val - (local_val % ppd)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index c892816629462..108d58bcc251d 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -840,3 +840,11 @@ def test_to_datetime64(self, dt64, ts):
res = ts.to_datetime64()
assert res == dt64
assert res.dtype == dt64.dtype
+
+ def test_timestamp(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.timestamp() == alt.timestamp()
+
+ def test_to_period(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.to_period("D") == alt.to_period("D")
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46990 | 2022-05-10T21:55:56Z | 2022-05-11T01:58:59Z | 2022-05-11T01:58:59Z | 2022-05-11T15:10:30Z |
ENH: Implement nlargest and nsmallest for DataFrameGroupBy | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 2efc6c9167a83..042452c79230e 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -150,6 +150,7 @@ Other enhancements
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
+- Implemented :meth:`nlargest` and :meth:`nsmallest` methods for :class:`DataFrameGroupBy` (:issue:`46924`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ec9a2e4a4b5c0..8eb6ad061cc25 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -33,6 +33,8 @@ class OutputKey:
"corr",
"cov",
"diff",
+ "nlargest",
+ "nsmallest",
]
)
| plotting_methods
@@ -40,9 +42,7 @@ class OutputKey:
series_apply_allowlist: frozenset[str] = (
common_apply_allowlist
- | frozenset(
- {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
- )
+ | frozenset({"is_monotonic_increasing", "is_monotonic_decreasing"})
) | frozenset(["dtype", "unique"])
dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset(
@@ -155,6 +155,8 @@ def maybe_normalize_deprecated_kernels(kernel):
"transform",
"sample",
"value_counts",
+ "nlargest",
+ "nsmallest",
]
)
# Valid values of `name` for `groupby.transform(name)`
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 245e33fb1a23b..9aa840def1294 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1812,6 +1812,24 @@ def value_counts(
result = result_frame
return result.__finalize__(self.obj, method="value_counts")
+ @doc(DataFrame.nlargest)
+ def nlargest(self, n, columns, keep: str = "first"):
+ f = partial(DataFrame.nlargest, n=n, columns=columns, keep=keep)
+ data = self._obj_with_exclusions
+ # Don't change behavior if result index happens to be the same, i.e.
+ # already ordered and n >= all group sizes.
+ result = self._python_apply_general(f, data, not_indexed_same=True)
+ return result
+
+ @doc(DataFrame.nsmallest)
+ def nsmallest(self, n, columns, keep: str = "first"):
+ f = partial(DataFrame.nsmallest, n=n, columns=columns, keep=keep)
+ data = self._obj_with_exclusions
+ # Don't change behavior if result index happens to be the same, i.e.
+ # already ordered and n >= all group sizes.
+ result = self._python_apply_general(f, data, not_indexed_same=True)
+ return result
+
def _wrap_transform_general_frame(
obj: DataFrame, group: DataFrame, res: DataFrame | Series
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 7c64d82608c9e..cce7f85afb9e1 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -51,6 +51,8 @@
"corr",
"cov",
"diff",
+ "nlargest",
+ "nsmallest",
]
@@ -322,6 +324,8 @@ def test_tab_completion(mframe):
"sample",
"ewm",
"value_counts",
+ "nlargest",
+ "nsmallest",
}
assert results == expected
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 016e817e43402..f92761e93f9d8 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2721,3 +2721,70 @@ def test_by_column_values_with_same_starting_value():
).set_index("Name")
tm.assert_frame_equal(result, expected_result)
+
+
+@pytest.mark.parametrize(
+ "function, keep, indices, name, data",
+ [
+ (
+ "nlargest",
+ "first",
+ [("bar", 1), ("bar", 2), ("foo", 5), ("foo", 3)],
+ ["b2", "b3", "f3", "f1"],
+ [3, 3, 3, 1],
+ ),
+ (
+ "nlargest",
+ "last",
+ [("bar", 2), ("bar", 1), ("foo", 5), ("foo", 4)],
+ ["b3", "b2", "f3", "f2"],
+ [3, 3, 3, 1],
+ ),
+ (
+ "nlargest",
+ "all",
+ [("bar", 1), ("bar", 2), ("foo", 5), ("foo", 3), ("foo", 4)],
+ ["b2", "b3", "f3", "f1", "f2"],
+ [3, 3, 3, 1, 1],
+ ),
+ (
+ "nsmallest",
+ "first",
+ [("bar", 0), ("bar", 1), ("foo", 3), ("foo", 4)],
+ ["b1", "b2", "f1", "f2"],
+ [1, 3, 1, 1],
+ ),
+ (
+ "nsmallest",
+ "last",
+ [("bar", 0), ("bar", 2), ("foo", 4), ("foo", 3)],
+ ["b1", "b3", "f2", "f1"],
+ [1, 3, 1, 1],
+ ),
+ (
+ "nsmallest",
+ "all",
+ [("bar", 0), ("bar", 1), ("bar", 2), ("foo", 3), ("foo", 4)],
+ ["b1", "b2", "b3", "f1", "f2"],
+ [1, 3, 3, 1, 1],
+ ),
+ ],
+)
+def test_nlargest_nsmallest(function, keep, indices, name, data):
+ # test nlargest and nsmallest for DataFrameGroupBy
+ # GH46924
+ df = DataFrame(
+ {
+ "group": ["bar", "bar", "bar", "foo", "foo", "foo"],
+ "name": ["b1", "b2", "b3", "f1", "f2", "f3"],
+ "data": [1, 3, 3, 1, 1, 3],
+ }
+ )
+ grouped = df.groupby("group")
+ func = getattr(grouped, function)
+ result = func(n=2, keep=keep, columns="data")
+
+ expected_index = MultiIndex.from_tuples(indices, names=["group", None])
+ expected = DataFrame({"name": name, "data": data}, index=expected_index)
+
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #46924
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
Try to implement `nlargest` and `nsmallest` methods for `DataFrameGroupBy`. Appreciate any comments.
```python
>>> df
group name data
0 bar b1 1
1 bar b2 3
2 bar b3 3
3 foo f1 1
4 foo f2 1
5 foo f3 3
>>> df.groupby("group").nlargest(n=2)
name data
group
bar 1 b2 3
2 b3 3
foo 5 f3 3
3 f1 1
>>> df.groupby("group").nlargest(n=2, keep="all")
name data
group
bar 1 b2 3
2 b3 3
foo 5 f3 3
3 f1 1
4 f2 1
>>> df.groupby("group").nsmallest(n=2)
name data
group
bar 0 b1 1
1 b2 3
foo 3 f1 1
4 f2 1
>>> df.groupby("group").nsmallest(n=2, keep="all")
name data
group
bar 0 b1 1
1 b2 3
2 b3 3
foo 3 f1 1
4 f2 1
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/46986 | 2022-05-10T15:34:59Z | 2022-06-23T22:20:27Z | null | 2022-06-23T22:20:28Z |
TST: Assign back multiple column to datetime | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index c61f3c028f129..539b56667ee07 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -21,11 +21,13 @@
Interval,
NaT,
Series,
+ Timestamp,
array,
concat,
date_range,
interval_range,
isna,
+ to_datetime,
)
import pandas._testing as tm
from pandas.api.types import is_scalar
@@ -1196,6 +1198,23 @@ def test_iloc_getitem_int_single_ea_block_view(self):
arr[2] = arr[-1]
assert ser[0] == arr[-1]
+ def test_iloc_setitem_multicolumn_to_datetime(self, using_array_manager):
+
+ # GH#20511
+ df = DataFrame({"A": ["2022-01-01", "2022-01-02"], "B": ["2021", "2022"]})
+
+ df.iloc[:, [0]] = DataFrame({"A": to_datetime(["2021", "2022"])})
+ expected = DataFrame(
+ {
+ "A": [
+ Timestamp("2021-01-01 00:00:00"),
+ Timestamp("2022-01-01 00:00:00"),
+ ],
+ "B": ["2021", "2022"],
+ }
+ )
+ tm.assert_frame_equal(df, expected, check_dtype=using_array_manager)
+
class TestILocErrors:
# NB: this test should work for _any_ Series we can pass as
| This tests make sure when converting multiple columns to datetimes
and when assiging back it remains as datetime not as unix date
as mentioned in GH #20511.
- [x] closes #20511
- [x] [Tests added and passed]
- [x] All [code checks passed]
| https://api.github.com/repos/pandas-dev/pandas/pulls/46982 | 2022-05-10T06:24:57Z | 2022-06-05T23:44:41Z | 2022-06-05T23:44:41Z | 2022-06-05T23:44:46Z |
CI: Move MacOS build from Azure to GHA | diff --git a/.github/workflows/windows.yml b/.github/workflows/macos-windows.yml
similarity index 70%
rename from .github/workflows/windows.yml
rename to .github/workflows/macos-windows.yml
index 6f267357554a3..560a421ec74ec 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -1,4 +1,4 @@
-name: Windows
+name: Windows-MacOS
on:
push:
@@ -21,18 +21,20 @@ env:
jobs:
pytest:
- runs-on: windows-latest
defaults:
run:
shell: bash -el {0}
timeout-minutes: 90
strategy:
matrix:
+ os: [macos-latest, windows-latest]
env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
fail-fast: false
+ runs-on: ${{ matrix.os }}
+ name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }}
concurrency:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }}
cancel-in-progress: true
steps:
@@ -47,10 +49,17 @@ jobs:
mamba-version: "*"
channels: conda-forge
activate-environment: pandas-dev
- channel-priority: strict
+ channel-priority: ${{ matrix.os == 'macos-latest' && 'flexible' || 'strict' }}
environment-file: ci/deps/${{ matrix.env_file }}
use-only-tar-bz2: true
+ # ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
+ # Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
+ # Reason: image not found
+ - name: Upgrade pyarrow on MacOS
+ run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
+ if: ${{ matrix.os == 'macos-latest' }}
+
- name: Build Pandas
uses: ./.github/actions/build_pandas
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index b2ae620019962..37df662df8edc 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -22,11 +22,6 @@ variables:
PANDAS_CI: 1
jobs:
-- template: ci/azure/posix.yml
- parameters:
- name: macOS
- vmImage: macOS-10.15
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
deleted file mode 100644
index df1d5049be33d..0000000000000
--- a/ci/azure/posix.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
- displayName: 'Set conda path'
-
- - script: rm /usr/local/miniconda/pkgs/cache/*.json
- displayName: 'Workaround for mamba-org/mamba#488'
-
- - script: ci/setup_env.sh
- displayName: 'Setup environment and build pandas'
-
- - script: |
- conda run -n pandas-dev --no-capture-output ci/run_tests.sh
- displayName: 'Test'
-
- - script: |
- pushd /tmp
- conda run -n pandas-dev python -c "import pandas; pandas.show_versions()"
- popd
- displayName: 'Build versions'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index a85767eb6f1b4..483353cfcb3cd 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -73,15 +73,6 @@ mamba install -n pandas-dev 'setuptools<60'
echo "conda list -n pandas-dev"
conda list -n pandas-dev
-# From pyarrow on MacOS
-# ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
-# Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
-# Reason: image not found
-if [[ "$(uname)" == 'Darwin' ]]; then
- echo "Update pyarrow for pyarrow on MacOS"
- conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
-fi
-
if [[ "$BITS32" == "yes" ]]; then
# activate 32-bit compiler
export CONDA_BUILD=1
| - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Similar to https://github.com/pandas-dev/pandas/pull/46960
| https://api.github.com/repos/pandas-dev/pandas/pulls/46981 | 2022-05-10T02:38:18Z | 2022-05-12T15:54:27Z | 2022-05-12T15:54:27Z | 2022-05-13T16:00:47Z |
TST: avoid chained assignment in tests outside of specific tests on chaining | diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index daddca7891b93..47ebca0b9bf5c 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -66,7 +66,7 @@ def test_combine_first(self, float_frame):
assert (combined["A"][:10] == 1).all()
# reverse overlap
- tail["A"][:10] = 0
+ tail.iloc[:10, tail.columns.get_loc("A")] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 2f0a4195d2f74..ee9af3f436943 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -27,8 +27,8 @@ def test_cov(self, float_frame, float_string_frame):
# with NAs
frame = float_frame.copy()
- frame["A"][:5] = np.nan
- frame["B"][5:10] = np.nan
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
+ frame.iloc[5:10, frame.columns.get_loc("B")] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 7c33242192d2e..3e22734992d23 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1333,7 +1333,8 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
- frame_copy["C"][:5] = np.nan
+ # adding NAs to first 5 values of column "C"
+ frame_copy.loc[: frame_copy.index[4], "C"] = np.nan
added = float_frame + frame_copy
diff --git a/pandas/tests/groupby/test_apply_mutate.py b/pandas/tests/groupby/test_apply_mutate.py
index 36e117cf03353..d1f25aabe31a2 100644
--- a/pandas/tests/groupby/test_apply_mutate.py
+++ b/pandas/tests/groupby/test_apply_mutate.py
@@ -72,7 +72,7 @@ def test_apply_function_with_indexing():
)
def fn(x):
- x.col2[x.index[-1]] = 0
+ x.loc[x.index[-1], "col2"] = 0
return x.col2
result = df.groupby(["col1"], as_index=False).apply(fn)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index 9d5e65e692fdc..a57b363c0a448 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -128,7 +128,7 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
df = ymd.copy()
exp = ymd.copy()
df.loc[2000, 4] = 0
- exp.loc[2000, 4].values[:] = 0
+ exp.iloc[65:85] = 0
tm.assert_frame_equal(df, exp)
df["A"].loc[2000, 4] = 1
@@ -136,7 +136,7 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
- exp.loc[2000].values[:] = 5
+ exp.iloc[:100] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3f8e4401808b7..1cd96bff4177d 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -53,7 +53,7 @@
def test_not_change_nan_loc(series, new_series, expected_ser):
# GH 28403
df = DataFrame({"A": series})
- df["A"].loc[:] = new_series
+ df.loc[:, "A"] = new_series
expected = DataFrame({"A": expected_ser})
tm.assert_frame_equal(df.isna(), expected)
tm.assert_frame_equal(df.notna(), ~expected)
| A small part of the test changes from #46958 that can be done separately.
We have specific tests about chained indexing (eg `test_chaining_and_caching.py`), so outside those specific tests, I can think we can avoid using chained indexing (regardless of #46958, this would follow our own recommendation on best indexing practices, although it also shows that some cases of mixed positional/label based setting is somewhat convoluted) | https://api.github.com/repos/pandas-dev/pandas/pulls/46980 | 2022-05-09T23:03:27Z | 2022-05-10T00:07:13Z | 2022-05-10T00:07:13Z | 2022-05-10T08:16:05Z |
API/TST: add tests for new copy/view behaviour | diff --git a/pandas/tests/copy_view/__init__.py b/pandas/tests/copy_view/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
new file mode 100644
index 0000000000000..16cd72cc1cb06
--- /dev/null
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -0,0 +1,649 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+import pandas.core.common as com
+from pandas.tests.copy_view.util import get_array
+
+# -----------------------------------------------------------------------------
+# Indexing operations taking subset + modifying the subset/parent
+
+
+def test_subset_column_selection(using_copy_on_write):
+ # Case: taking a subset of the columns of a DataFrame
+ # + afterwards modifying the subset
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ subset = df[["a", "c"]]
+
+ if using_copy_on_write:
+ # the subset shares memory ...
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # ... but uses CoW when being modified
+ subset.iloc[0, 0] = 0
+ else:
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # INFO this no longer raise warning since pandas 1.4
+ # with pd.option_context("chained_assignment", "warn"):
+ # with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset.iloc[0, 0] = 0
+
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_column_selection_modify_parent(using_copy_on_write):
+ # Case: taking a subset of the columns of a DataFrame
+ # + afterwards modifying the parent
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+
+ subset = df[["a", "c"]]
+ if using_copy_on_write:
+ # the subset shares memory ...
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # ... but parent uses CoW parent when it is modified
+ df.iloc[0, 0] = 0
+
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ # different column/block still shares memory
+ assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))
+
+ expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(subset, expected)
+
+
+def test_subset_row_slice(using_copy_on_write):
+ # Case: taking a subset of the rows of a DataFrame using a slice
+ # + afterwards modifying the subset
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ subset = df[1:3]
+ subset._mgr._verify_integrity()
+
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ if using_copy_on_write:
+ subset.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ else:
+ # INFO this no longer raise warning since pandas 1.4
+ # with pd.option_context("chained_assignment", "warn"):
+ # with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset.iloc[0, 0] = 0
+
+ subset._mgr._verify_integrity()
+
+ expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.iloc[1, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
+ # Case: taking a subset of the columns of a DataFrame using a slice
+ # + afterwards modifying the subset
+ single_block = (dtype == "int64") and not using_array_manager
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.iloc[:, 1:]
+ subset._mgr._verify_integrity()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
+
+ subset.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
+
+ else:
+ # we only get a warning in case of a single block
+ warn = com.SettingWithCopyWarning if single_block else None
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(warn):
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)})
+ tm.assert_frame_equal(subset, expected)
+ # original parent dataframe is not modified (also not for BlockManager case,
+ # except for single block)
+ if not using_copy_on_write and (using_array_manager or single_block):
+ df_orig.iloc[0, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+@pytest.mark.parametrize(
+ "row_indexer",
+ [slice(1, 2), np.array([False, True, True]), np.array([1, 2])],
+ ids=["slice", "mask", "array"],
+)
+@pytest.mark.parametrize(
+ "column_indexer",
+ [slice("b", "c"), np.array([False, True, True]), ["b", "c"]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_loc_rows_columns(
+ dtype, row_indexer, column_indexer, using_array_manager
+):
+ # Case: taking a subset of the rows+columns of a DataFrame using .loc
+ # + afterwards modifying the subset
+ # Generic test for several combinations of row/column indexers, not all
+ # of those could actually return a view / need CoW (so this test is not
+ # checking memory sharing, only ensuring subsequent mutation doesn't
+ # affect the parent dataframe)
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.loc[row_indexer, column_indexer]
+
+ # modifying the subset never modifies the parent
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame(
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ # a few corner cases _do_ actually modify the parent (with both row and column
+ # slice, and in case of ArrayManager or BlockManager with single block)
+ if (
+ isinstance(row_indexer, slice)
+ and isinstance(column_indexer, slice)
+ and (using_array_manager or dtype == "int64")
+ ):
+ df_orig.iloc[1, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+@pytest.mark.parametrize(
+ "row_indexer",
+ [slice(1, 3), np.array([False, True, True]), np.array([1, 2])],
+ ids=["slice", "mask", "array"],
+)
+@pytest.mark.parametrize(
+ "column_indexer",
+ [slice(1, 3), np.array([False, True, True]), [1, 2]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_iloc_rows_columns(
+ dtype, row_indexer, column_indexer, using_array_manager
+):
+ # Case: taking a subset of the rows+columns of a DataFrame using .iloc
+ # + afterwards modifying the subset
+ # Generic test for several combinations of row/column indexers, not all
+ # of those could actually return a view / need CoW (so this test is not
+ # checking memory sharing, only ensuring subsequent mutation doesn't
+ # affect the parent dataframe)
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.iloc[row_indexer, column_indexer]
+
+ # modifying the subset never modifies the parent
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame(
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ # a few corner cases _do_ actually modify the parent (with both row and column
+ # slice, and in case of ArrayManager or BlockManager with single block)
+ if (
+ isinstance(row_indexer, slice)
+ and isinstance(column_indexer, slice)
+ and (using_array_manager or dtype == "int64")
+ ):
+ df_orig.iloc[1, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_set_with_row_indexer(indexer_si, indexer, using_copy_on_write):
+ # Case: setting values with a row indexer on a viewing subset
+ # subset[indexer] = value and subset.iloc[indexer] = value
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
+ df_orig = df.copy()
+ subset = df[1:4]
+
+ if (
+ indexer_si is tm.setitem
+ and isinstance(indexer, np.ndarray)
+ and indexer.dtype == "int"
+ ):
+ pytest.skip("setitem with labels selects on columns")
+
+ if using_copy_on_write:
+ indexer_si(subset)[indexer] = 0
+ else:
+ # INFO iloc no longer raises warning since pandas 1.4
+ warn = com.SettingWithCopyWarning if indexer_si is tm.setitem else None
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(warn):
+ indexer_si(subset)[indexer] = 0
+
+ expected = DataFrame(
+ {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4)
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig[1:3] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_with_mask(using_copy_on_write):
+ # Case: setting values with a mask on a viewing subset: subset[mask] = value
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
+ df_orig = df.copy()
+ subset = df[1:4]
+
+ mask = subset > 3
+
+ if using_copy_on_write:
+ subset[mask] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset[mask] = 0
+
+ expected = DataFrame(
+ {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4)
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[3, "a"] = 0
+ df_orig.loc[1:3, "b"] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_column(using_copy_on_write):
+ # Case: setting a single column on a viewing subset -> subset[col] = value
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset["a"] = np.array([10, 11], dtype="int64")
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset["a"] = np.array([10, 11], dtype="int64")
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame(
+ {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_set_column_with_loc(using_copy_on_write, using_array_manager, dtype):
+ # Case: setting a single column with loc on a viewing subset
+ # -> subset.loc[:, col] = value
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ raise_on_extra_warnings = False if using_array_manager else True
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning,
+ raise_on_extra_warnings=raise_on_extra_warnings,
+ ):
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame(
+ {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)},
+ index=range(1, 3),
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64")
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_column_with_loc2(using_copy_on_write, using_array_manager):
+ # Case: setting a single column with loc on a viewing subset
+ # -> subset.loc[:, col] = value
+ # separate test for case of DataFrame of a single column -> takes a separate
+ # code path
+ df = DataFrame({"a": [1, 2, 3]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, "a"] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ raise_on_extra_warnings = False if using_array_manager else True
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning,
+ raise_on_extra_warnings=raise_on_extra_warnings,
+ ):
+ subset.loc[:, "a"] = 0
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame({"a": [0, 0]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[1:3, "a"] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_set_columns(using_copy_on_write, dtype):
+ # Case: setting multiple columns on a viewing subset
+ # -> subset[[col1, col2]] = value
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset[["a", "c"]] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset[["a", "c"]] = 0
+
+ subset._mgr._verify_integrity()
+ if using_copy_on_write:
+ # first and third column should certainly have no references anymore
+ assert all(subset._mgr._has_no_reference(i) for i in [0, 2])
+ expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice("a", "b"), np.array([True, True, False]), ["a", "b"]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_set_with_column_indexer(
+ indexer, using_copy_on_write, using_array_manager
+):
+ # Case: setting multiple columns with a column indexer on a viewing subset
+ # -> subset.loc[:, [col1, col2]] = value
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, indexer] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning, raise_on_extra_warnings=False
+ ):
+ subset.loc[:, indexer] = 0
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3))
+ # TODO full row slice .loc[:, idx] update inplace instead of overwrite?
+ expected["b"] = expected["b"].astype("int64")
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # In the mixed case with BlockManager, only one of the two columns is
+ # mutated in the parent frame ..
+ df_orig.loc[1:2, ["a"]] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+# TODO add more tests modifying the parent
+
+
+# -----------------------------------------------------------------------------
+# Series -- Indexing operations taking subset + modifying the subset/parent
+
+
+def test_series_getitem_slice(using_copy_on_write):
+ # Case: taking a slice of a Series + afterwards modifying the subset
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+
+ subset = s[:]
+ assert np.shares_memory(subset.values, s.values)
+
+ subset.iloc[0] = 0
+
+ if using_copy_on_write:
+ assert not np.shares_memory(subset.values, s.values)
+
+ expected = Series([0, 2, 3], index=["a", "b", "c"])
+ tm.assert_series_equal(subset, expected)
+
+ if using_copy_on_write:
+ # original parent series is not modified (CoW)
+ tm.assert_series_equal(s, s_orig)
+ else:
+ # original parent series is actually updated
+ assert s.iloc[0] == 0
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
+ ids=["slice", "mask", "array"],
+)
+def test_series_subset_set_with_indexer(indexer_si, indexer, using_copy_on_write):
+ # Case: setting values in a viewing Series with an indexer
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+ subset = s[:]
+
+ indexer_si(subset)[indexer] = 0
+ expected = Series([0, 0, 3], index=["a", "b", "c"])
+ tm.assert_series_equal(subset, expected)
+
+ if using_copy_on_write:
+ tm.assert_series_equal(s, s_orig)
+ else:
+ tm.assert_series_equal(s, expected)
+
+
+# -----------------------------------------------------------------------------
+# del operator
+
+
+def test_del_frame(using_copy_on_write):
+ # Case: deleting a column with `del` on a viewing child dataframe should
+ # not modify parent + update the references
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df[:]
+
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ del df2["b"]
+
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ tm.assert_frame_equal(df2, df_orig[["a", "c"]])
+ df2._mgr._verify_integrity()
+
+ # TODO in theory modifying column "b" of the parent wouldn't need a CoW
+ # but the weakref is still alive and so we still perform CoW
+
+ df2.loc[0, "a"] = 100
+ if using_copy_on_write:
+ # modifying child after deleting a column still doesn't update parent
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ assert df.loc[0, "a"] == 100
+
+
+def test_del_series():
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+ s2 = s[:]
+
+ assert np.shares_memory(s.values, s2.values)
+
+ del s2["a"]
+
+ assert not np.shares_memory(s.values, s2.values)
+ tm.assert_series_equal(s, s_orig)
+ tm.assert_series_equal(s2, s_orig[["b", "c"]])
+
+ # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array)
+ values = s2.values
+ s2.loc["b"] = 100
+ assert values[0] == 100
+
+
+# -----------------------------------------------------------------------------
+# Accessing column as Series
+
+
+def test_column_as_series(using_copy_on_write, using_array_manager):
+ # Case: selecting a single column now also uses Copy-on-Write
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ s = df["a"]
+
+ assert np.shares_memory(s.values, get_array(df, "a"))
+
+ if using_copy_on_write or using_array_manager:
+ s[0] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ s[0] = 0
+
+ expected = Series([0, 2, 3], name="a")
+ tm.assert_series_equal(s, expected)
+ if using_copy_on_write:
+ # assert not np.shares_memory(s.values, get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ # ensure cached series on getitem is not the changed series
+ tm.assert_series_equal(df["a"], df_orig["a"])
+ else:
+ df_orig.iloc[0, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_column_as_series_set_with_upcast(using_copy_on_write, using_array_manager):
+ # Case: selecting a single column now also uses Copy-on-Write -> when
+ # setting a value causes an upcast, we don't need to update the parent
+ # DataFrame through the cache mechanism
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ s = df["a"]
+ if using_copy_on_write or using_array_manager:
+ s[0] = "foo"
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ s[0] = "foo"
+
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
+ tm.assert_series_equal(s, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_orig)
+ # ensure cached series on getitem is not the changed series
+ tm.assert_series_equal(df["a"], df_orig["a"])
+ else:
+ df_orig["a"] = expected
+ tm.assert_frame_equal(df, df_orig)
+
+
+# TODO add tests for other indexing methods on the Series
+
+
+def test_dataframe_add_column_from_series():
+ # Case: adding a new column to a DataFrame from an existing column/series
+ # -> always already takes a copy on assignment
+ # (no change in behaviour here)
+ # TODO can we achieve the same behaviour with Copy-on-Write?
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
+
+ s = Series([10, 11, 12])
+ df["new"] = s
+ assert not np.shares_memory(get_array(df, "new"), s.values)
+
+ # editing series -> doesn't modify column in frame
+ s[0] = 0
+ expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]})
+ tm.assert_frame_equal(df, expected)
+
+ # editing column in frame -> doesn't modify series
+ df.loc[2, "new"] = 100
+ expected_s = Series([0, 11, 12])
+ tm.assert_series_equal(s, expected_s)
+
+
+# TODO add tests for constructors
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
new file mode 100644
index 0000000000000..1ed458e95b78e
--- /dev/null
+++ b/pandas/tests/copy_view/test_methods.py
@@ -0,0 +1,128 @@
+import numpy as np
+
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.tests.copy_view.util import get_array
+
+
+def test_copy(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_copy = df.copy()
+
+ # the deep copy doesn't share memory
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert df_copy._mgr.refs is None
+
+ # mutating copy doesn't mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 1
+
+
+def test_copy_shallow(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_copy = df.copy(deep=False)
+
+ # the shallow copy still shares memory
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert df_copy._mgr.refs is not None
+
+ if using_copy_on_write:
+ # mutating shallow copy doesn't mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 1
+ # mutating triggered a copy-on-write -> no longer shares memory
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ # but still shares memory for the other columns/blocks
+ assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c"))
+ else:
+ # mutating shallow copy does mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 0
+ # and still shares memory
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+
+
+# -----------------------------------------------------------------------------
+# DataFrame methods returning new DataFrame using shallow copy
+
+
+def test_reset_index(using_copy_on_write):
+ # Case: resetting the index (i.e. adding a new column) + mutating the
+ # resulting dataframe
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12]
+ )
+ df_orig = df.copy()
+ df2 = df.reset_index()
+ df2._mgr._verify_integrity()
+
+ if using_copy_on_write:
+ # still shares memory (df2 is a shallow copy)
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ # mutating df2 triggers a copy-on-write for that column / block
+ df2.iloc[0, 2] = 0
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_rename_columns(using_copy_on_write):
+ # Case: renaming columns returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.rename(columns=str.upper)
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ df2.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
+ expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(df2, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_rename_columns_modify_parent(using_copy_on_write):
+ # Case: renaming columns returns a new dataframe
+ # + afterwards modifying the original (parent) dataframe
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df2 = df.rename(columns=str.upper)
+ df2_orig = df2.copy()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ df.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
+ expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df2, df2_orig)
+
+
+def test_reindex_columns(using_copy_on_write):
+ # Case: reindexing the column returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.reindex(columns=["a", "c"])
+
+ if using_copy_on_write:
+ # still shares memory (df2 is a shallow copy)
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ # mutating df2 triggers a copy-on-write for that column
+ df2.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ tm.assert_frame_equal(df, df_orig)
diff --git a/pandas/tests/copy_view/util.py b/pandas/tests/copy_view/util.py
new file mode 100644
index 0000000000000..9e358c7eec749
--- /dev/null
+++ b/pandas/tests/copy_view/util.py
@@ -0,0 +1,11 @@
+def get_array(df, col):
+ """
+ Helper method to get array for a DataFrame column.
+
+ Equivalent of df[col].values, but without going through normal getitem,
+ which triggers tracking references / CoW (and we might be testing that
+ this is done by some other operation).
+ """
+ icol = df.columns.get_loc(col)
+ assert isinstance(icol, int)
+ return df._get_column_array(icol)
| This is broken off from https://github.com/pandas-dev/pandas/pull/46958 / https://github.com/pandas-dev/pandas/pull/41878
This are the _new_ tests that I wrote for the PRs implementing the proposed new copy/view semantics with Copy-on-Write (https://github.com/pandas-dev/pandas/issues/36195). They are intended to be readable tests to follow and review the intended semantics.
They will partly be duplicating some other tests that also deal with copy/view details spread around in the test suite (see the required test edits in the abovementioned PRs), but I would say this is fine, as I think it is good to have a set of centralized tests focused on those copy/view semantics.
I broke of this part from the main PR because I think this part can be done as a pre-cursor (reducing the diff size for the main PR), and because it might be useful to review those tests as a way to review the proposed behaviours (cc @pandas-dev/pandas-core).
Some practical questions about the test organization:
- Does the general style of the tests look OK?
- I currently put those tests in `/tests/indexing/test_copy_on_write.py`, but 1) it's not only about indexing, and 2) "copy_on_write" is more the internal implementation mechanism, while the user facing thing we are testing is copy/view behaviour. So maybe I could put them in a new top-level `tests/copy_view/` test directory? (which also allows more easily to split it in multiple files) | https://api.github.com/repos/pandas-dev/pandas/pulls/46979 | 2022-05-09T22:50:06Z | 2022-05-31T17:01:57Z | 2022-05-31T17:01:57Z | 2022-05-31T22:07:19Z |
follow-up to 44787, use pandas compat for platform specifics in added test | diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 35749aabdc39f..98e136a9c4ba6 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -11,6 +11,8 @@
import pytest
+from pandas.compat import is_platform_windows
+
import pandas as pd
import pandas._testing as tm
@@ -325,5 +327,10 @@ def test_tar_gz_to_different_filename():
members = archive.getmembers()
assert len(members) == 1
content = archive.extractfile(members[0]).read().decode("utf8")
- content = content.replace("\r\n", "\n") # windows
- assert content == "foo,bar\n1,2\n"
+
+ if is_platform_windows():
+ expected = "foo,bar\r\n1,2\r\n"
+ else:
+ expected = "foo,bar\n1,2\n"
+
+ assert content == expected
| follow-up to #44787, as requested by @mroeschke in https://github.com/pandas-dev/pandas/pull/44787#issuecomment-1120292009.
Updates one added test to use `is_platform_windows()` for dealing with carriage returns. | https://api.github.com/repos/pandas-dev/pandas/pulls/46973 | 2022-05-09T07:30:29Z | 2022-05-09T18:48:15Z | 2022-05-09T18:48:15Z | 2022-05-09T18:48:26Z |
WIP: ENH Add int[pyarrow] dtype | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 3645e8744d8af..1423e65bf52b7 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -47,6 +47,14 @@
from pandas.core.api import (
# dtype
+ Int8ArrowDtype,
+ Int16ArrowDtype,
+ Int32ArrowDtype,
+ Int64ArrowDtype,
+ UInt8ArrowDtype,
+ UInt16ArrowDtype,
+ UInt32ArrowDtype,
+ UInt64ArrowDtype,
Int8Dtype,
Int16Dtype,
Int32Dtype,
diff --git a/pandas/core/api.py b/pandas/core/api.py
index cf082d2013d3b..373003e2b42a1 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -27,6 +27,16 @@
value_counts,
)
from pandas.core.arrays import Categorical
+from pandas.core.arrays.arrow.integer import (
+ Int8ArrowDtype,
+ Int16ArrowDtype,
+ Int32ArrowDtype,
+ Int64ArrowDtype,
+ UInt8ArrowDtype,
+ UInt16ArrowDtype,
+ UInt32ArrowDtype,
+ UInt64ArrowDtype,
+)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import (
Float32Dtype,
diff --git a/pandas/core/arrays/arrow/integer.py b/pandas/core/arrays/arrow/integer.py
new file mode 100644
index 0000000000000..e7de9c03ec711
--- /dev/null
+++ b/pandas/core/arrays/arrow/integer.py
@@ -0,0 +1,150 @@
+from __future__ import annotations
+
+import pyarrow as pa
+
+from pandas.core.dtypes.base import register_extension_dtype
+
+from pandas.core.arrays.arrow.numeric import (
+ NumericArrowArray,
+ NumericArrowDtype,
+)
+
+
+class IntegerArrowDtype(NumericArrowDtype):
+ """
+ An ExtensionDtype to hold a single size & kind of integer Arrow dtype.
+
+ These specific implementations are subclasses of the non-public
+ IntegerArrowDtype. For example we have Int8ArrowDtype to represent signed int 8s.
+
+ The attributes name & type are set when these subclasses are created.
+ """
+
+ _default_pa_dtype = pa.int64()
+ _dtype_checker = pa.types.is_integer
+
+ @classmethod
+ def construct_array_type(cls) -> type[IntegerArrowArray]:
+ """
+ Return the array type associated with this dtype.
+
+ Returns
+ -------
+ type
+ """
+ return IntegerArrowArray
+
+ @classmethod
+ def _str_to_dtype_mapping(cls):
+ return INT_STR_TO_DTYPE
+
+
+class IntegerArrowArray(NumericArrowArray):
+ """
+ Array of pyarrow integer values.
+
+ To construct an IntegerArray from generic array-like ipaut, use
+ :func:`pandas.array` with one of the integer dtypes (see examples).
+
+ Parameters
+ ----------
+ values : pa.ChunkedArray
+ A 1-d integer-dtype array.
+
+ Attributes
+ ----------
+ None
+
+ Methods
+ -------
+ None
+
+ Returns
+ -------
+ IntegerArrowArray
+ """
+
+ _dtype_cls = IntegerArrowDtype
+
+
+_dtype_docstring = """
+An ExtensionDtype for {dtype} integer pyarrow data.
+
+Attributes
+----------
+None
+
+Methods
+-------
+None
+"""
+
+# create the Dtype
+
+
+@register_extension_dtype
+class Int8ArrowDtype(IntegerArrowDtype):
+ type = pa.int8()
+ name = "int8"
+ __doc__ = _dtype_docstring.format(dtype="int8")
+
+
+@register_extension_dtype
+class Int16ArrowDtype(IntegerArrowDtype):
+ type = pa.int16()
+ name = "int16"
+ __doc__ = _dtype_docstring.format(dtype="int16")
+
+
+@register_extension_dtype
+class Int32ArrowDtype(IntegerArrowDtype):
+ type = pa.int32()
+ name = "int32"
+ __doc__ = _dtype_docstring.format(dtype="int32")
+
+
+@register_extension_dtype
+class Int64ArrowDtype(IntegerArrowDtype):
+ type = pa.int64()
+ name = "int64"
+ __doc__ = _dtype_docstring.format(dtype="int64")
+
+
+@register_extension_dtype
+class UInt8ArrowDtype(IntegerArrowDtype):
+ type = pa.uint8()
+ name = "uint8"
+ __doc__ = _dtype_docstring.format(dtype="uint8")
+
+
+@register_extension_dtype
+class UInt16ArrowDtype(IntegerArrowDtype):
+ type = pa.uint16()
+ name = "uint16"
+ __doc__ = _dtype_docstring.format(dtype="uint16")
+
+
+@register_extension_dtype
+class UInt32ArrowDtype(IntegerArrowDtype):
+ type = pa.uint32()
+ name = "uint32"
+ __doc__ = _dtype_docstring.format(dtype="uint32")
+
+
+@register_extension_dtype
+class UInt64ArrowDtype(IntegerArrowDtype):
+ type = pa.uint64()
+ name = "uint64"
+ __doc__ = _dtype_docstring.format(dtype="uint64")
+
+
+INT_STR_TO_DTYPE: dict[str, IntegerArrowDtype] = {
+ "int8": Int8ArrowDtype(),
+ "int16": Int16ArrowDtype(),
+ "int32": Int32ArrowDtype(),
+ "int64": Int64ArrowDtype(),
+ "uint8": UInt8ArrowDtype(),
+ "uint16": UInt16ArrowDtype(),
+ "uint32": UInt32ArrowDtype(),
+ "uint64": UInt64ArrowDtype(),
+}
diff --git a/pandas/core/arrays/arrow/numeric.py b/pandas/core/arrays/arrow/numeric.py
new file mode 100644
index 0000000000000..3b55d34f6e67b
--- /dev/null
+++ b/pandas/core/arrays/arrow/numeric.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Callable,
+ TypeVar,
+)
+
+import pyarrow as pa
+
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import cache_readonly
+
+from pandas.core.arrays.arrow.array import ArrowExtensionArray
+from pandas.core.arrays.arrow.dtype import ArrowDtype
+
+T = TypeVar("T", bound="NumericArrowArray")
+
+
+class NumericArrowDtype(ArrowDtype):
+ _default_pa_dtype: pa.null()
+ _dtype_checker: Callable[[Any], bool] # pa.types.is_<type>
+
+ @property
+ def _is_numeric(self) -> bool:
+ return True
+
+ @cache_readonly
+ def is_signed_integer(self) -> bool:
+ return self.kind == "i"
+
+ @cache_readonly
+ def is_unsigned_integer(self) -> bool:
+ return self.kind == "u"
+
+ @classmethod
+ def _str_to_dtype_mapping(cls):
+ raise AbstractMethodError(cls)
+
+
+class NumericArrowArray(ArrowExtensionArray):
+ """
+ Base class for Integer and Floating and Boolean dtypes.
+ """
+
+ _dtype_cls: type[NumericArrowDtype]
+
+ def __init__(self, values: pa.ChunkedArray) -> None:
+ checker = self._dtype_cls._dtype_checker
+ if not (isinstance(values, pa.ChunkedArray) and checker(values.type)):
+ descr = (
+ "floating"
+ if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
+ else "integer"
+ )
+ raise TypeError(f"values should be {descr} arrow array.")
+ super().__init__(values)
+
+ @cache_readonly
+ def dtype(self) -> NumericArrowDtype:
+ mapping = self._dtype_cls._str_to_dtype_mapping()
+ return mapping[str(self._data.type)]
+
+ @classmethod
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
+ if dtype is None:
+ dtype = cls._dtype_cls._default_pa_dtype
+ return cls(pa.chunked_array([scalars], type=dtype.type))
+
+ @classmethod
+ def _from_sequence_of_strings(cls, strings, *, dtype=None, copy: bool = False):
+ from pandas.core.tools.numeric import to_numeric
+
+ scalars = to_numeric(strings, errors="raise")
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
| - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46972 | 2022-05-09T03:06:18Z | 2022-05-16T18:54:08Z | null | 2022-05-16T18:54:12Z |
Backport PR #46960 on branch 1.4.x (CI: Move Windows build from Azure to GHA) | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index e916d5bfde5fb..5e5a3bdf0f024 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -12,6 +12,9 @@ runs:
- name: Build Pandas
run: |
- python setup.py build_ext -j 2
+ python setup.py build_ext -j $N_JOBS
python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
shell: bash -el {0}
+ env:
+ # Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
+ N_JOBS: ${{ runner.os == 'Windows' && 1 || 2 }}
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index f5cbb0e88ff11..b86dcea59edb8 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -180,7 +180,6 @@ jobs:
run: ci/run_tests.sh
# TODO: Don't continue on error for PyPy
continue-on-error: ${{ env.IS_PYPY == 'true' }}
- if: always()
- name: Build Version
run: conda list
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000000000..6f267357554a3
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,75 @@
+name: Windows
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+env:
+ PANDAS_CI: 1
+ PYTEST_TARGET: pandas
+ PYTEST_WORKERS: auto
+ PATTERN: "not slow and not db and not network and not single_cpu"
+
+
+jobs:
+ pytest:
+ runs-on: windows-latest
+ defaults:
+ run:
+ shell: bash -el {0}
+ timeout-minutes: 90
+ strategy:
+ matrix:
+ env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ fail-fast: false
+ concurrency:
+ # https://github.community/t/concurrecy-not-work-for-push/183068/7
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ cancel-in-progress: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Install Dependencies
+ uses: conda-incubator/setup-miniconda@v2.1.1
+ with:
+ mamba-version: "*"
+ channels: conda-forge
+ activate-environment: pandas-dev
+ channel-priority: strict
+ environment-file: ci/deps/${{ matrix.env_file }}
+ use-only-tar-bz2: true
+
+ - name: Build Pandas
+ uses: ./.github/actions/build_pandas
+
+ - name: Test
+ run: ci/run_tests.sh
+
+ - name: Build Version
+ run: conda list
+
+ - name: Publish test results
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ flags: unittests
+ name: codecov-pandas
+ fail_ci_if_error: false
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index d84f2d7784935..0b2a9f5b2b0cd 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -27,11 +27,6 @@ jobs:
name: macOS
vmImage: macOS-10.15
-- template: ci/azure/windows.yml
- parameters:
- name: Windows
- vmImage: windows-2019
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
deleted file mode 100644
index 02c6564579aa2..0000000000000
--- a/ci/azure/windows.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - powershell: |
- Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
- Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
- displayName: 'Add conda to PATH'
- - bash: conda install -yv -c conda-forge -n base 'mamba>=0.21.2'
- displayName: 'Install mamba'
-
- - bash: |
- # See https://github.com/mamba-org/mamba/issues/1370
- # See https://github.com/mamba-org/mamba/issues/633
- C:\\Miniconda\\condabin\\mamba.bat create -n pandas-dev
- C:\\Miniconda\\condabin\\mamba.bat env update -n pandas-dev --file ci\\deps\\actions-$(CONDA_PY).yaml
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- C:\\Miniconda\\condabin\\mamba.bat install -n pandas-dev 'setuptools<60'
- C:\\Miniconda\\condabin\\mamba.bat list -n pandas-dev
- displayName: 'Create anaconda environment'
- - bash: |
- source activate pandas-dev
- conda list
- python setup.py build_ext -q -j 2
- python -m pip install --no-build-isolation -e .
- displayName: 'Build'
- - bash: |
- source activate pandas-dev
- wmic.exe cpu get caption, deviceid, name, numberofcores, maxclockspeed
- ci/run_tests.sh
- displayName: 'Test'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
| Backport PR #46960: CI: Move Windows build from Azure to GHA | https://api.github.com/repos/pandas-dev/pandas/pulls/46971 | 2022-05-08T22:19:42Z | 2022-05-10T00:05:30Z | 2022-05-10T00:05:30Z | 2022-05-10T00:05:30Z |
TYP: enable reportOverlappingOverload | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index a27ed42c984bf..d4a2bedcfba1a 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -74,7 +74,7 @@ jobs:
- name: Install pyright
# note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.230
+ run: npm install -g pyright@1.1.245
- name: Build Pandas
id: build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bd095c03e6fdb..fac09fcf70511 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -89,7 +89,7 @@ repos:
types: [python]
stages: [manual]
# note: keep version in sync with .github/workflows/code-checks.yml
- additional_dependencies: ['pyright@1.1.230']
+ additional_dependencies: ['pyright@1.1.245']
- repo: local
hooks:
- id: flake8-rst
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index bad6013aa58b6..9b727b6278792 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -68,10 +68,12 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
left: _OrderableT,
right: _OrderableT,
closed: IntervalClosedType = ...,
- ): ...
+ ) -> None: ...
def __hash__(self) -> int: ...
@overload
- def __contains__(self: Interval[_OrderableTimesT], _OrderableTimesT) -> bool: ...
+ def __contains__(
+ self: Interval[_OrderableTimesT], key: _OrderableTimesT
+ ) -> bool: ...
@overload
def __contains__(
self: Interval[_OrderableScalarT], key: Union[int, float]
@@ -83,9 +85,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __add__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __add__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __add__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __add__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -93,9 +95,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __radd__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __radd__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __radd__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __radd__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -103,9 +105,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __sub__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __sub__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __sub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __sub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -113,33 +115,33 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __rsub__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __rsub__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __rsub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __rsub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __mul__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __mul__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __mul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __mul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __rmul__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __rmul__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __rmul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __rmul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __truediv__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __truediv__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __truediv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __truediv__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __floordiv__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __floordiv__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __floordiv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __floordiv__(
self: Interval[float], y: Union[int, float]
@@ -157,7 +159,7 @@ class IntervalTree(IntervalMixin):
right: np.ndarray,
closed: IntervalClosedType = ...,
leaf_size: int = ...,
- ): ...
+ ) -> None: ...
@property
def mid(self) -> np.ndarray: ...
@property
diff --git a/pyproject.toml b/pyproject.toml
index 7b32c5f8eab49..030e6bc3c470c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -156,6 +156,7 @@ exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
# enable subset of "strict"
reportDuplicateImport = true
reportInvalidStubStatement = true
+reportOverlappingOverload = true
reportPropertyTypeMismatch = true
reportUntypedClassDecorator = true
reportUntypedFunctionDecorator = true
| reportOverlappingOverload needs a newer pyright version (otherwise it has a few false positives). I intended to update pyright together with mypy in #46905 first, but mypy still hasn't been updated on conda-forge. | https://api.github.com/repos/pandas-dev/pandas/pulls/46969 | 2022-05-08T14:33:00Z | 2022-05-09T00:06:39Z | 2022-05-09T00:06:39Z | 2022-05-26T01:59:34Z |
CLN: mmap used by only read_csv | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 15a8f2e114041..fdee1600c2a32 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -640,7 +640,7 @@ def get_handle(
.. versionchanged:: 1.4.0 Zstandard support.
memory_map : bool, default False
- See parsers._parser_params for more information.
+ See parsers._parser_params for more information. Only used by read_csv.
is_text : bool, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
@@ -659,6 +659,8 @@ def get_handle(
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding = encoding or "utf-8"
+ errors = errors or "strict"
+
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
@@ -681,6 +683,7 @@ def get_handle(
handles: list[BaseBuffer]
# memory mapping needs to be the first step
+ # only used for read_csv
handle, memory_map, handles = _maybe_memory_map(
handle,
memory_map,
@@ -1064,7 +1067,7 @@ def closed(self):
return self.fp is None
-class _MMapWrapper(abc.Iterator):
+class _CSVMMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
@@ -1079,7 +1082,7 @@ class _MMapWrapper(abc.Iterator):
def __init__(
self,
- f: IO,
+ f: ReadBuffer[bytes],
encoding: str = "utf-8",
errors: str = "strict",
decode: bool = True,
@@ -1089,11 +1092,13 @@ def __init__(
self.decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
self.decode = decode
+ # needed for compression libraries and TextIOWrapper
self.attributes = {}
for attribute in ("seekable", "readable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
+
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
@@ -1101,7 +1106,7 @@ def __getattr__(self, name: str):
return lambda: self.attributes[name]
return getattr(self.mmap, name)
- def __iter__(self) -> _MMapWrapper:
+ def __iter__(self) -> _CSVMMapWrapper:
return self
def read(self, size: int = -1) -> str | bytes:
@@ -1196,7 +1201,7 @@ def _maybe_memory_map(
memory_map: bool,
encoding: str,
mode: str,
- errors: str | None,
+ errors: str,
decode: bool,
) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
"""Try to memory map file/buffer."""
@@ -1207,25 +1212,22 @@ def _maybe_memory_map(
# need to open the file first
if isinstance(handle, str):
- if encoding and "b" not in mode:
- # Encoding
- handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
- else:
- # Binary mode
- handle = open(handle, mode)
+ handle = open(handle, "rb")
handles.append(handle)
# error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any],
# RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]"
try:
+ # open mmap, adds *-able, and convert to string
wrapped = cast(
BaseBuffer,
- _MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
+ _CSVMMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
)
finally:
for handle in reversed(handles):
# error: "BaseBuffer" has no attribute "close"
handle.close() # type: ignore[attr-defined]
+ handles = []
handles.append(wrapped)
return wrapped, memory_map, handles
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index ca6809470b2b1..22399917f2bf7 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -413,18 +413,18 @@ def test_constructor_bad_file(self, mmap_file):
err = mmap.error
with pytest.raises(err, match=msg):
- icom._MMapWrapper(non_file)
+ icom._CSVMMapWrapper(non_file)
with open(mmap_file) as target:
pass
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
- icom._MMapWrapper(target)
+ icom._CSVMMapWrapper(target)
def test_get_attr(self, mmap_file):
with open(mmap_file) as target:
- wrapper = icom._MMapWrapper(target)
+ wrapper = icom._CSVMMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs if not attr.startswith("__")]
@@ -437,7 +437,7 @@ def test_get_attr(self, mmap_file):
def test_next(self, mmap_file):
with open(mmap_file) as target:
- wrapper = icom._MMapWrapper(target)
+ wrapper = icom._CSVMMapWrapper(target)
lines = target.readlines()
for line in lines:
| Make it clear that mmap is only ever used by `read_csv`. | https://api.github.com/repos/pandas-dev/pandas/pulls/46967 | 2022-05-07T22:10:42Z | 2022-05-09T00:04:45Z | 2022-05-09T00:04:45Z | 2022-05-26T01:59:27Z |
TYP: overload asarray_tuplesafe signature | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 098b501cc95c9..2e8d6dbced4e3 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -225,14 +225,27 @@ def count_not_none(*args) -> int:
return sum(x is not None for x in args)
-def asarray_tuplesafe(values, dtype: NpDtype | None = None) -> np.ndarray:
+@overload
+def asarray_tuplesafe(
+ values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
+) -> np.ndarray:
+ # ExtensionArray can only be returned when values is an Index, all other iterables
+ # will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
+ # signature, so instead we special-case some common types.
+ ...
+
+
+@overload
+def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
+ ...
+
+
+def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
elif isinstance(values, ABCIndex):
- # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
- # expected "ndarray")
- return values._values # type: ignore[return-value]
+ return values._values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 59e55bdcb405a..8ebaaa28e13a5 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -555,10 +555,7 @@ def __new__(
subarr = com.asarray_tuplesafe(data, dtype=_dtype_obj)
if dtype is None:
# with e.g. a list [1, 2, 3] casting to numeric is _not_ deprecated
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray[Any, Any]]", variable has type
- # "ndarray[Any, Any]")
- subarr = _maybe_cast_data_without_dtype( # type: ignore[assignment]
+ subarr = _maybe_cast_data_without_dtype(
subarr, cast_numeric_deprecated=False
)
dtype = subarr.dtype
| xref https://github.com/pandas-dev/pandas/issues/37715 | https://api.github.com/repos/pandas-dev/pandas/pulls/46966 | 2022-05-07T20:08:48Z | 2022-05-09T00:05:36Z | 2022-05-09T00:05:36Z | 2022-05-09T00:05:36Z |
Fix link to be *true* raw data | diff --git a/doc/source/getting_started/intro_tutorials/includes/titanic.rst b/doc/source/getting_started/intro_tutorials/includes/titanic.rst
index 312ca48b45dd1..19b8e81914e31 100644
--- a/doc/source/getting_started/intro_tutorials/includes/titanic.rst
+++ b/doc/source/getting_started/intro_tutorials/includes/titanic.rst
@@ -26,6 +26,6 @@ consists of the following data columns:
.. raw:: html
</p>
- <a href="https://github.com/pandas-dev/pandas/tree/main/doc/data/titanic.csv" class="btn btn-dark btn-sm">To raw data</a>
+ <a href="https://github.com/pandas-dev/pandas/raw/main/doc/data/titanic.csv" class="btn btn-dark btn-sm">To raw data</a>
</div>
</div>
| Which would be handy for `wget`
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46963 | 2022-05-07T08:05:05Z | 2022-05-08T22:18:14Z | 2022-05-08T22:18:14Z | 2022-05-08T22:18:19Z |
CI: Move Windows build from Azure to GHA | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index e916d5bfde5fb..5e5a3bdf0f024 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -12,6 +12,9 @@ runs:
- name: Build Pandas
run: |
- python setup.py build_ext -j 2
+ python setup.py build_ext -j $N_JOBS
python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
shell: bash -el {0}
+ env:
+ # Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
+ N_JOBS: ${{ runner.os == 'Windows' && 1 || 2 }}
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index f5cbb0e88ff11..b86dcea59edb8 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -180,7 +180,6 @@ jobs:
run: ci/run_tests.sh
# TODO: Don't continue on error for PyPy
continue-on-error: ${{ env.IS_PYPY == 'true' }}
- if: always()
- name: Build Version
run: conda list
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000000000..6f267357554a3
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,75 @@
+name: Windows
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+env:
+ PANDAS_CI: 1
+ PYTEST_TARGET: pandas
+ PYTEST_WORKERS: auto
+ PATTERN: "not slow and not db and not network and not single_cpu"
+
+
+jobs:
+ pytest:
+ runs-on: windows-latest
+ defaults:
+ run:
+ shell: bash -el {0}
+ timeout-minutes: 90
+ strategy:
+ matrix:
+ env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ fail-fast: false
+ concurrency:
+ # https://github.community/t/concurrecy-not-work-for-push/183068/7
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ cancel-in-progress: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Install Dependencies
+ uses: conda-incubator/setup-miniconda@v2.1.1
+ with:
+ mamba-version: "*"
+ channels: conda-forge
+ activate-environment: pandas-dev
+ channel-priority: strict
+ environment-file: ci/deps/${{ matrix.env_file }}
+ use-only-tar-bz2: true
+
+ - name: Build Pandas
+ uses: ./.github/actions/build_pandas
+
+ - name: Test
+ run: ci/run_tests.sh
+
+ - name: Build Version
+ run: conda list
+
+ - name: Publish test results
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ flags: unittests
+ name: codecov-pandas
+ fail_ci_if_error: false
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index f7c97f0554e0e..b2ae620019962 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -27,11 +27,6 @@ jobs:
name: macOS
vmImage: macOS-10.15
-- template: ci/azure/windows.yml
- parameters:
- name: Windows
- vmImage: windows-2019
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
deleted file mode 100644
index 02c6564579aa2..0000000000000
--- a/ci/azure/windows.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - powershell: |
- Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
- Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
- displayName: 'Add conda to PATH'
- - bash: conda install -yv -c conda-forge -n base 'mamba>=0.21.2'
- displayName: 'Install mamba'
-
- - bash: |
- # See https://github.com/mamba-org/mamba/issues/1370
- # See https://github.com/mamba-org/mamba/issues/633
- C:\\Miniconda\\condabin\\mamba.bat create -n pandas-dev
- C:\\Miniconda\\condabin\\mamba.bat env update -n pandas-dev --file ci\\deps\\actions-$(CONDA_PY).yaml
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- C:\\Miniconda\\condabin\\mamba.bat install -n pandas-dev 'setuptools<60'
- C:\\Miniconda\\condabin\\mamba.bat list -n pandas-dev
- displayName: 'Create anaconda environment'
- - bash: |
- source activate pandas-dev
- conda list
- python setup.py build_ext -q -j 2
- python -m pip install --no-build-isolation -e .
- displayName: 'Build'
- - bash: |
- source activate pandas-dev
- wmic.exe cpu get caption, deviceid, name, numberofcores, maxclockspeed
- ci/run_tests.sh
- displayName: 'Test'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
Took parts from https://github.com/pandas-dev/pandas/pull/46611
| https://api.github.com/repos/pandas-dev/pandas/pulls/46960 | 2022-05-07T03:54:35Z | 2022-05-08T22:19:03Z | 2022-05-08T22:19:03Z | 2022-05-08T22:22:00Z |
ENH: Timestamp.month_name, day_name support non-nano | diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py
index 203afcdaa7378..23ae73811204c 100644
--- a/asv_bench/benchmarks/tslibs/fields.py
+++ b/asv_bench/benchmarks/tslibs/fields.py
@@ -66,9 +66,9 @@ class TimeGetStartEndField:
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
- self.dt64data = arr.view("M8[ns]")
+ self.i8data = arr
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
- get_start_end_field(self.dt64data, self.attrname, freqstr, month_kw=month_kw)
+ get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi
index 228f7dbdf5eac..e404eadf13657 100644
--- a/pandas/_libs/tslibs/fields.pyi
+++ b/pandas/_libs/tslibs/fields.pyi
@@ -10,12 +10,14 @@ def get_date_name_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
locale: str | None = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def get_start_end_field(
- dt64values: npt.NDArray[np.datetime64],
+ dtindex: npt.NDArray[np.int64],
field: str,
freqstr: str | None = ...,
month_kw: int = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.bool_]: ...
def get_date_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index e8980dc1a7553..57d4c27b3337d 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -44,6 +44,7 @@ from pandas._libs.tslibs.ccalendar cimport (
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
+ NPY_FR_ns,
dt64_to_dtstruct,
get_unit_from_dtype,
npy_datetimestruct,
@@ -139,13 +140,18 @@ def month_position_check(fields, weekdays) -> str | None:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None):
+def get_date_name_field(
+ const int64_t[:] dtindex,
+ str field,
+ object locale=None,
+ NPY_DATETIMEUNIT reso=NPY_FR_ns,
+):
"""
Given a int64-based datetime index, return array of strings of date
name based on requested field (e.g. day_name)
"""
cdef:
- Py_ssize_t i, count = len(dtindex)
+ Py_ssize_t i, count = dtindex.shape[0]
ndarray[object] out, names
npy_datetimestruct dts
int dow
@@ -163,7 +169,7 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
out[i] = np.nan
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
dow = dayofweek(dts.year, dts.month, dts.day)
out[i] = names[dow].capitalize()
@@ -178,7 +184,7 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
out[i] = np.nan
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = names[dts.month].capitalize()
else:
@@ -201,8 +207,13 @@ cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_start_end_field(ndarray dt64values, str field,
- str freqstr=None, int month_kw=12):
+def get_start_end_field(
+ const int64_t[:] dtindex,
+ str field,
+ str freqstr=None,
+ int month_kw=12,
+ NPY_DATETIMEUNIT reso=NPY_FR_ns,
+):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
@@ -210,10 +221,11 @@ def get_start_end_field(ndarray dt64values, str field,
Parameters
----------
- dt64values : ndarray[datetime64], any resolution
+ dtindex : ndarray[int64]
field : str
frestr : str or None, default None
month_kw : int, default 12
+ reso : NPY_DATETIMEUNIT, default NPY_FR_ns
Returns
-------
@@ -221,15 +233,13 @@ def get_start_end_field(ndarray dt64values, str field,
"""
cdef:
Py_ssize_t i
- int count = dt64values.size
+ int count = dtindex.shape[0]
bint is_business = 0
int end_month = 12
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
int compare_month, modby
- ndarray dtindex = dt64values.view("i8")
- NPY_DATETIMEUNIT reso = get_unit_from_dtype(dt64values.dtype)
out = np.zeros(count, dtype='int8')
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index e7ac855d6a832..923d1f830e1a9 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -487,7 +487,6 @@ cdef class _Timestamp(ABCTimestamp):
dict kwds
ndarray[uint8_t, cast=True] out
int month_kw
- str unit
if freq:
kwds = freq.kwds
@@ -499,9 +498,8 @@ cdef class _Timestamp(ABCTimestamp):
val = self._maybe_convert_value_to_local()
- unit = npy_unit_to_abbrev(self._reso)
- out = get_start_end_field(np.array([val], dtype=f"M8[{unit}]"),
- field, freqstr, month_kw)
+ out = get_start_end_field(np.array([val], dtype=np.int64),
+ field, freqstr, month_kw, self._reso)
return out[0]
cdef _warn_on_field_deprecation(self, freq, str field):
@@ -661,12 +659,10 @@ cdef class _Timestamp(ABCTimestamp):
int64_t val
object[::1] out
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
val = self._maybe_convert_value_to_local()
+
out = get_date_name_field(np.array([val], dtype=np.int64),
- field, locale=locale)
+ field, locale=locale, reso=self._reso)
return out[0]
def day_name(self, locale=None) -> str:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 7fef934a85626..6f984727f4f6d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -38,11 +38,13 @@
tz_convert_from_utc,
tzconversion,
)
+from pandas._libs.tslibs.np_datetime import py_get_unit_from_dtype
from pandas._typing import npt
from pandas.errors import (
OutOfBoundsDatetime,
PerformanceWarning,
)
+from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_inclusive
@@ -131,7 +133,7 @@ def f(self):
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
- values.view(self._ndarray.dtype), field, self.freqstr, month_kw
+ values, field, self.freqstr, month_kw, reso=self._reso
)
else:
result = fields.get_date_field(values, field)
@@ -140,7 +142,7 @@ def f(self):
return result
if field in self._object_ops:
- result = fields.get_date_name_field(values, field)
+ result = fields.get_date_name_field(values, field, reso=self._reso)
result = self._maybe_mask_results(result, fill_value=None)
else:
@@ -544,6 +546,10 @@ def _check_compatible_with(self, other, setitem: bool = False):
# -----------------------------------------------------------------
# Descriptive Properties
+ @cache_readonly
+ def _reso(self):
+ return py_get_unit_from_dtype(self._ndarray.dtype)
+
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
@@ -1270,7 +1276,9 @@ def month_name(self, locale=None):
"""
values = self._local_timestamps()
- result = fields.get_date_name_field(values, "month_name", locale=locale)
+ result = fields.get_date_name_field(
+ values, "month_name", locale=locale, reso=self._reso
+ )
result = self._maybe_mask_results(result, fill_value=None)
return result
@@ -1313,7 +1321,9 @@ def day_name(self, locale=None):
"""
values = self._local_timestamps()
- result = fields.get_date_name_field(values, "day_name", locale=locale)
+ result = fields.get_date_name_field(
+ values, "day_name", locale=locale, reso=self._reso
+ )
result = self._maybe_mask_results(result, fill_value=None)
return result
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index bc9e6c0131646..c892816629462 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -753,6 +753,14 @@ def test_start_end_fields(self, ts):
assert not ts.is_month_end
assert not ts.is_month_end
+ def test_day_name(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.day_name() == alt.day_name()
+
+ def test_month_name(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.month_name() == alt.month_name()
+
def test_repr(self, dt64, ts):
alt = Timestamp(dt64)
diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py
index 528d08d7f499b..9e6464f7727bd 100644
--- a/pandas/tests/tslibs/test_fields.py
+++ b/pandas/tests/tslibs/test_fields.py
@@ -28,10 +28,7 @@ def test_get_date_field_readonly(dtindex):
def test_get_start_end_field_readonly(dtindex):
- dt64values = dtindex.view("M8[ns]")
- dt64values.flags.writeable = False
-
- result = fields.get_start_end_field(dt64values, "is_month_start", None)
+ result = fields.get_start_end_field(dtindex, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
| Should also fix an asv that currently fails when run on e.g. v1.4.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/46959 | 2022-05-06T22:19:01Z | 2022-05-07T02:28:13Z | 2022-05-07T02:28:13Z | 2022-05-07T16:41:02Z |
API: New copy / view semantics using Copy-on-Write | diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index c4398efb12c3d..b8268a82d9b70 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -52,6 +52,10 @@ jobs:
extra_apt: "language-pack-zh-hans"
lang: "zh_CN.utf8"
lc_all: "zh_CN.utf8"
+ - name: "Copy-on-Write"
+ env_file: actions-310.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "1"
- name: "Data Manager"
env_file: actions-38.yaml
pattern: "not slow and not network and not single_cpu"
@@ -84,6 +88,7 @@ jobs:
LC_ALL: ${{ matrix.lc_all || '' }}
PANDAS_TESTING_MODE: ${{ matrix.pandas_testing_mode || '' }}
PANDAS_DATA_MANAGER: ${{ matrix.pandas_data_manager || 'block' }}
+ PANDAS_COPY_ON_WRITE: ${{ matrix.pandas_copy_on_write || '0' }}
TEST_ARGS: ${{ matrix.test_args || '' }}
PYTEST_WORKERS: ${{ contains(matrix.pattern, 'not single_cpu') && 'auto' || '1' }}
PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }}
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 159fdbc080fb4..94ae4a021da4d 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -1,4 +1,5 @@
from collections import defaultdict
+import weakref
cimport cython
from cpython.slice cimport PySlice_GetIndicesEx
@@ -674,8 +675,9 @@ cdef class BlockManager:
public list axes
public bint _known_consolidated, _is_consolidated
public ndarray _blknos, _blklocs
+ public list refs
- def __cinit__(self, blocks=None, axes=None, verify_integrity=True):
+ def __cinit__(self, blocks=None, axes=None, refs=None, verify_integrity=True):
# None as defaults for unpickling GH#42345
if blocks is None:
# This adds 1-2 microseconds to DataFrame(np.array([]))
@@ -687,6 +689,7 @@ cdef class BlockManager:
self.blocks = blocks
self.axes = axes.copy() # copy to make sure we are not remotely-mutable
+ self.refs = refs
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
@@ -795,12 +798,14 @@ cdef class BlockManager:
ndarray blknos, blklocs
nbs = []
+ nrefs = []
for blk in self.blocks:
nb = blk.getitem_block_index(slobj)
nbs.append(nb)
+ nrefs.append(weakref.ref(blk))
new_axes = [self.axes[0], self.axes[1]._getitem_slice(slobj)]
- mgr = type(self)(tuple(nbs), new_axes, verify_integrity=False)
+ mgr = type(self)(tuple(nbs), new_axes, nrefs, verify_integrity=False)
# We can avoid having to rebuild blklocs/blknos
blklocs = self._blklocs
@@ -813,7 +818,7 @@ cdef class BlockManager:
def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
if axis == 0:
- new_blocks = self._slice_take_blocks_ax0(slobj)
+ new_blocks, new_refs = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
return self._get_index_slice(slobj)
else:
@@ -822,4 +827,4 @@ cdef class BlockManager:
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
- return type(self)(tuple(new_blocks), new_axes, verify_integrity=False)
+ return type(self)(tuple(new_blocks), new_axes, new_refs, verify_integrity=False)
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index c8db82500d0d6..813e8de72f96e 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -224,7 +224,7 @@ def load_newobj(self):
arr = np.array([], dtype="m8[ns]")
obj = cls.__new__(cls, arr, arr.dtype)
elif cls is BlockManager and not args:
- obj = cls.__new__(cls, (), [], False)
+ obj = cls.__new__(cls, (), [], None, False)
else:
obj = cls.__new__(cls, *args)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 54c24b4c0b58a..e3909953d8a58 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -29,10 +29,7 @@
from decimal import Decimal
import operator
import os
-from typing import (
- Callable,
- Literal,
-)
+from typing import Callable
from dateutil.tz import (
tzlocal,
@@ -1844,8 +1841,8 @@ def using_array_manager():
@pytest.fixture
-def using_copy_on_write() -> Literal[False]:
+def using_copy_on_write() -> bool:
"""
Fixture to check if Copy-on-Write is enabled.
"""
- return False
+ return pd.options.mode.copy_on_write and pd.options.mode.data_manager == "block"
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 4434ed5a8b5f7..2579f736a9703 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -540,6 +540,26 @@ def use_inf_as_na_cb(key) -> None:
)
+# TODO better name?
+copy_on_write_doc = """
+: bool
+ Use new copy-view behaviour using Copy-on-Write. Defaults to False,
+ unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
+ (if set to "1" for True, needs to be set before pandas is imported).
+"""
+
+
+with cf.config_prefix("mode"):
+ cf.register_option(
+ "copy_on_write",
+ # Get the default from an environment variable, if set, otherwise defaults
+ # to False. This environment variable can be set for testing.
+ os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
+ copy_on_write_doc,
+ validator=is_bool,
+ )
+
+
# user warnings
chained_assignment = """
: string
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 53fddb032a487..b574dee081c06 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3708,6 +3708,9 @@ def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution (for read-only purposes).
"""
return self._mgr.iget_values(i)
@@ -3715,6 +3718,9 @@ def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution (for read-only purposes).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
@@ -5145,7 +5151,7 @@ def set_axis(
"labels",
[
("method", None),
- ("copy", True),
+ ("copy", None),
("level", None),
("fill_value", np.nan),
("limit", None),
@@ -5370,7 +5376,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: Literal[True],
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5385,7 +5391,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: Literal[False] = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5400,7 +5406,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: bool = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5414,7 +5420,7 @@ def rename(
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
- copy: bool = True,
+ copy: bool | None = None,
inplace: bool = False,
level: Level = None,
errors: IgnoreRaise = "ignore",
@@ -6288,7 +6294,7 @@ class max type
if inplace:
new_obj = self
else:
- new_obj = self.copy()
+ new_obj = self.copy(deep=None)
if allow_duplicates is not lib.no_default:
allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates")
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7ed6e0d84445c..6ab0b03f570c1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1034,7 +1034,7 @@ def _rename(
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
- copy: bool_t = True,
+ copy: bool_t | None = None,
inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
@@ -4145,6 +4145,12 @@ def _check_setitem_copy(self, t="setting", force=False):
df.iloc[0:5]['group'] = 'a'
"""
+ if (
+ config.get_option("mode.copy_on_write")
+ and config.get_option("mode.data_manager") == "block"
+ ):
+ return
+
# return early if the check is not needed
if not (force or self._is_copy):
return
@@ -5245,7 +5251,7 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
- copy = kwargs.pop("copy", True)
+ copy = kwargs.pop("copy", None)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
@@ -5270,9 +5276,7 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
for axis, ax in axes.items()
if ax is not None
):
- if copy:
- return self.copy()
- return self
+ return self.copy(deep=copy)
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
@@ -6249,7 +6253,7 @@ def astype(
return cast(NDFrameT, result)
@final
- def copy(self: NDFrameT, deep: bool_t = True) -> NDFrameT:
+ def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT:
"""
Make a copy of this object's indices and data.
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 88f81064b826f..dcf69dfda1ae8 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -527,6 +527,11 @@ def copy(self: T, deep=True) -> T:
-------
BlockManager
"""
+ if deep is None:
+ # ArrayManager does not yet support CoW, so deep=None always means
+ # deep=True for now
+ deep = True
+
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
@@ -591,6 +596,11 @@ def _reindex_indexer(
pandas-indexer with -1's only.
"""
+ if copy is None:
+ # ArrayManager does not yet support CoW, so deep=None always means
+ # deep=True for now
+ copy = True
+
if indexer is None:
if new_axis is self._axes[axis] and not copy:
return self
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 3e27cf0b15511..522fe3cb192ec 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -839,10 +839,13 @@ def _slice(
return self.values[slicer]
- def set_inplace(self, locs, values: ArrayLike) -> None:
+ def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None:
"""
Modify block values in-place with new item value.
+ If copy=True, first copy the underlying values in place before modifying
+ (for Copy-on-Write).
+
Notes
-----
`set_inplace` never creates a new array or new Block, whereas `setitem`
@@ -850,6 +853,8 @@ def set_inplace(self, locs, values: ArrayLike) -> None:
Caller is responsible for checking values.dtype == self.dtype.
"""
+ if copy:
+ self.values = self.values.copy()
self.values[locs] = values
def take_nd(
@@ -1665,9 +1670,11 @@ def iget(self, i: int | tuple[int, int] | tuple[slice, int]):
raise IndexError(f"{self} only contains one item")
return self.values
- def set_inplace(self, locs, values: ArrayLike) -> None:
+ def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None:
# When an ndarray, we should have locs.tolist() == [0]
# When a BlockPlacement we should have list(locs) == [0]
+ if copy:
+ self.values = self.values.copy()
self.values[:] = values
def _maybe_squeeze_arg(self, arg):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4e84b013b2a11..3084bcea49f05 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -12,9 +12,12 @@
cast,
)
import warnings
+import weakref
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import (
algos as libalgos,
internals as libinternals,
@@ -143,6 +146,7 @@ class BaseBlockManager(DataManager):
_blklocs: npt.NDArray[np.intp]
blocks: tuple[Block, ...]
axes: list[Index]
+ refs: list[weakref.ref | None] | None
@property
def ndim(self) -> int:
@@ -151,11 +155,16 @@ def ndim(self) -> int:
_known_consolidated: bool
_is_consolidated: bool
- def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:
+ def __init__(self, blocks, axes, refs=None, verify_integrity: bool = True) -> None:
raise NotImplementedError
@classmethod
- def from_blocks(cls: type_t[T], blocks: list[Block], axes: list[Index]) -> T:
+ def from_blocks(
+ cls: type_t[T],
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> T:
raise NotImplementedError
@property
@@ -228,6 +237,33 @@ def is_single_block(self) -> bool:
def items(self) -> Index:
return self.axes[0]
+ def _has_no_reference(self, i: int) -> bool:
+ """
+ Check for column `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the column has no references.
+ """
+ blkno = self.blknos[i]
+ return self._has_no_reference_block(blkno)
+
+ def _has_no_reference_block(self, blkno: int) -> bool:
+ """
+ Check for block `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the block has no references.
+ """
+ # TODO(CoW) include `or self.refs[blkno]() is None` ?
+ return (
+ self.refs is None or self.refs[blkno] is None
+ ) and weakref.getweakrefcount(self.blocks[blkno]) == 0
+
+ def _clear_reference_block(self, blkno: int) -> None:
+ """
+ Clear any reference for column `i`.
+ """
+ if self.refs is not None:
+ self.refs[blkno] = None
+
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return dtypes.take(self.blknos)
@@ -240,6 +276,9 @@ def arrays(self) -> list[ArrayLike]:
Only for compatibility with ArrayManager for testing convenience.
Not to be used in actual code, and return value is not the same as the
ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).
+
+ Warning! The returned arrays don't handle Copy-on-Write, so this should
+ be used with caution (only in read-mode).
"""
return [blk.values for blk in self.blocks]
@@ -342,9 +381,23 @@ def setitem(self: T, indexer, value) -> T:
if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
raise ValueError(f"Cannot set values with ndim > {self.ndim}")
+ if _using_copy_on_write() and not self._has_no_reference(0):
+ # if being referenced -> perform Copy-on-Write and clear the reference
+ # this method is only called if there is a single block -> hardcoded 0
+ self = self.copy()
+
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
+ if (
+ _using_copy_on_write()
+ and self.refs is not None
+ and not all(ref is None for ref in self.refs)
+ ):
+ # some reference -> copy full dataframe
+ # TODO(CoW) this could be optimized to only copy the blocks that would
+ # get modified
+ self = self.copy()
if align:
align_keys = ["new", "mask"]
@@ -378,6 +431,12 @@ def fillna(self: T, value, limit, inplace: bool, downcast) -> T:
if limit is not None:
# Do this validation even if we go through one of the no-op paths
limit = libalgos.validate_limit(None, limit=limit)
+ if inplace:
+ # TODO(CoW) can be optimized to only copy those blocks that have refs
+ if _using_copy_on_write() and any(
+ not self._has_no_reference_block(i) for i in range(len(self.blocks))
+ ):
+ self = self.copy()
return self.apply(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
@@ -527,17 +586,24 @@ def _combine(
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks: list[Block] = []
+ # TODO(CoW) we could optimize here if we know that the passed blocks
+ # are fully "owned" (eg created from an operation, not coming from
+ # an existing manager)
+ new_refs: list[weakref.ref | None] | None = None if copy else []
for b in blocks:
- b = b.copy(deep=copy)
- b.mgr_locs = BlockPlacement(inv_indexer[b.mgr_locs.indexer])
- new_blocks.append(b)
+ nb = b.copy(deep=copy)
+ nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])
+ new_blocks.append(nb)
+ if not copy:
+ # None has no attribute "append"
+ new_refs.append(weakref.ref(b)) # type: ignore[union-attr]
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
- return type(self).from_blocks(new_blocks, axes)
+ return type(self).from_blocks(new_blocks, axes, new_refs)
@property
def nblocks(self) -> int:
@@ -549,14 +615,22 @@ def copy(self: T, deep=True) -> T:
Parameters
----------
- deep : bool or string, default True
- If False, return shallow copy (do not copy data)
+ deep : bool, string or None, default True
+ If False or None, return a shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
+ if deep is None:
+ if _using_copy_on_write():
+ # use shallow copy
+ deep = False
+ else:
+ # preserve deep copy for BlockManager with copy=None
+ deep = True
+
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
@@ -569,8 +643,14 @@ def copy_func(ax):
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
+ new_refs: list[weakref.ref | None] | None
+ if deep:
+ new_refs = None
+ else:
+ new_refs = [weakref.ref(blk) for blk in self.blocks]
res.axes = new_axes
+ res.refs = new_refs
if self.ndim > 1:
# Avoid needing to re-compute these
@@ -594,7 +674,7 @@ def consolidate(self: T) -> T:
if self.is_consolidated():
return self
- bm = type(self)(self.blocks, self.axes, verify_integrity=False)
+ bm = type(self)(self.blocks, self.axes, self.refs, verify_integrity=False)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
@@ -606,7 +686,7 @@ def reindex_indexer(
axis: int,
fill_value=None,
allow_dups: bool = False,
- copy: bool = True,
+ copy: bool | None = True,
only_slice: bool = False,
*,
use_na_proxy: bool = False,
@@ -619,7 +699,8 @@ def reindex_indexer(
axis : int
fill_value : object, default None
allow_dups : bool, default False
- copy : bool, default True
+ copy : bool or None, default True
+ If None, regard as False to get shallow copy.
only_slice : bool, default False
Whether to take views, not copies, along columns.
use_na_proxy : bool, default False
@@ -627,6 +708,14 @@ def reindex_indexer(
pandas-indexer with -1's only.
"""
+ if copy is None:
+ if _using_copy_on_write():
+ # use shallow copy
+ copy = False
+ else:
+ # preserve deep copy for BlockManager with copy=None
+ copy = True
+
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
@@ -644,7 +733,7 @@ def reindex_indexer(
raise IndexError("Requested axis not found in manager")
if axis == 0:
- new_blocks = self._slice_take_blocks_ax0(
+ new_blocks, new_refs = self._slice_take_blocks_ax0(
indexer,
fill_value=fill_value,
only_slice=only_slice,
@@ -661,11 +750,12 @@ def reindex_indexer(
)
for blk in self.blocks
]
+ new_refs = None
new_axes = list(self.axes)
new_axes[axis] = new_axis
- new_mgr = type(self).from_blocks(new_blocks, new_axes)
+ new_mgr = type(self).from_blocks(new_blocks, new_axes, new_refs)
if axis == 1:
# We can avoid the need to rebuild these
new_mgr._blknos = self.blknos.copy()
@@ -679,7 +769,7 @@ def _slice_take_blocks_ax0(
only_slice: bool = False,
*,
use_na_proxy: bool = False,
- ) -> list[Block]:
+ ) -> tuple[list[Block], list[weakref.ref | None]]:
"""
Slice/take blocks along axis=0.
@@ -712,9 +802,11 @@ def _slice_take_blocks_ax0(
# GH#32959 EABlock would fail since we can't make 0-width
# TODO(EA2D): special casing unnecessary with 2D EAs
if sllen == 0:
- return []
+ return [], []
bp = BlockPlacement(slice(0, sllen))
- return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]
+ return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)], [
+ weakref.ref(blk)
+ ]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
fill_value = blk.fill_value
@@ -730,7 +822,7 @@ def _slice_take_blocks_ax0(
]
# We have
# all(np.shares_memory(nb.values, blk.values) for nb in blocks)
- return blocks
+ return blocks, [weakref.ref(blk)] * len(blocks)
else:
bp = BlockPlacement(slice(0, sllen))
return [
@@ -740,7 +832,7 @@ def _slice_take_blocks_ax0(
new_mgr_locs=bp,
fill_value=fill_value,
)
- ]
+ ], [None]
if sl_type == "slice":
blknos = self.blknos[slobj]
@@ -756,6 +848,7 @@ def _slice_take_blocks_ax0(
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
+ refs: list[weakref.ref | None] = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
@@ -768,6 +861,7 @@ def _slice_take_blocks_ax0(
use_na_proxy=use_na_proxy,
)
)
+ refs.append(None)
else:
blk = self.blocks[blkno]
@@ -781,18 +875,20 @@ def _slice_take_blocks_ax0(
newblk = blk.copy(deep=False)
newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
blocks.append(newblk)
+ refs.append(weakref.ref(blk))
else:
# GH#32779 to avoid the performance penalty of copying,
# we may try to only slice
taker = blklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
- if only_slice:
+ if only_slice or _using_copy_on_write():
taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
+ refs.append(weakref.ref(blk))
elif only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
@@ -802,11 +898,13 @@ def _slice_take_blocks_ax0(
nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
# We have np.shares_memory(nb.values, blk.values)
blocks.append(nb)
+ refs.append(weakref.ref(blk))
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
+ refs.append(None)
- return blocks
+ return blocks, refs
def _make_na_block(
self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False
@@ -873,6 +971,7 @@ def take(
indexer=indexer,
axis=axis,
allow_dups=True,
+ copy=None,
)
@@ -890,6 +989,7 @@ def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
+ refs: list[weakref.ref | None] | None = None,
verify_integrity: bool = True,
) -> None:
@@ -939,13 +1039,26 @@ def _verify_integrity(self) -> None:
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
+ if self.refs is not None:
+ if len(self.refs) != len(self.blocks):
+ raise AssertionError(
+ "Number of passed refs must equal the number of blocks: "
+ f"{len(self.refs)} refs vs {len(self.blocks)} blocks."
+ "\nIf you see this error, please report a bug at "
+ "https://github.com/pandas-dev/pandas/issues"
+ )
@classmethod
- def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager:
+ def from_blocks(
+ cls,
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> BlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
- return cls(blocks, axes, verify_integrity=False)
+ return cls(blocks, axes, refs, verify_integrity=False)
# ----------------------------------------------------------------
# Indexing
@@ -965,7 +1078,9 @@ def fast_xs(self, loc: int) -> SingleBlockManager:
if len(self.blocks) == 1:
result = self.blocks[0].iget((slice(None), loc))
block = new_block(result, placement=slice(0, len(result)), ndim=1)
- return SingleBlockManager(block, self.axes[0])
+ # in the case of a single block, the new block is a view
+ ref = weakref.ref(self.blocks[0])
+ return SingleBlockManager(block, self.axes[0], [ref])
dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
@@ -996,12 +1111,16 @@ def iget(self, i: int) -> SingleBlockManager:
# shortcut for select a single-dim from a 2-dim BM
bp = BlockPlacement(slice(0, len(values)))
nb = type(block)(values, placement=bp, ndim=1)
- return SingleBlockManager(nb, self.axes[1])
+ return SingleBlockManager(nb, self.axes[1], [weakref.ref(block)])
def iget_values(self, i: int) -> ArrayLike:
"""
Return the data for column i as the values (ndarray or ExtensionArray).
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution.
"""
+ # TODO(CoW) making the arrays read-only might make this safer to use?
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
return values
@@ -1011,6 +1130,9 @@ def column_arrays(self) -> list[np.ndarray]:
"""
Used in the JSON C code to access column arrays.
This optimizes compared to using `iget_values` by converting each
+
+ Warning! This doesn't handle Copy-on-Write, so should be used with
+ caution (current use case of consuming this in the JSON code is fine).
"""
# This is an optimized equivalent to
# result = [self.iget_values(i) for i in range(len(self.items))]
@@ -1102,7 +1224,12 @@ def value_getitem(placement):
blk = self.blocks[blkno_l]
blk_locs = blklocs[val_locs.indexer]
if inplace and blk.should_store(value):
- blk.set_inplace(blk_locs, value_getitem(val_locs))
+ # Updating inplace -> check if we need to do Copy-on-Write
+ if _using_copy_on_write() and not self._has_no_reference_block(blkno_l):
+ blk.set_inplace(blk_locs, value_getitem(val_locs), copy=True)
+ self._clear_reference_block(blkno_l)
+ else:
+ blk.set_inplace(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
@@ -1117,9 +1244,11 @@ def value_getitem(placement):
)
self.blocks = blocks_tup
self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
+ # blk.delete gives a copy, so we can remove a possible reference
+ self._clear_reference_block(blkno_l)
if len(removed_blknos):
- # Remove blocks & update blknos accordingly
+ # Remove blocks & update blknos and refs accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
@@ -1130,6 +1259,12 @@ def value_getitem(placement):
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
+ if self.refs is not None:
+ self.refs = [
+ ref
+ for i, ref in enumerate(self.refs)
+ if i not in set(removed_blknos)
+ ]
if unfit_val_locs:
unfit_idxr = np.concatenate(unfit_mgr_locs)
@@ -1166,6 +1301,10 @@ def value_getitem(placement):
self._blklocs[unfit_idxr] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
+ # TODO(CoW) is this always correct to assume that the new_blocks
+ # are not referencing anything else?
+ if self.refs is not None:
+ self.refs = list(self.refs) + [None] * len(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
@@ -1183,14 +1322,20 @@ def _iset_single(
# Caller is responsible for verifying value.shape
if inplace and blk.should_store(value):
+ copy = False
+ if _using_copy_on_write() and not self._has_no_reference_block(blkno):
+ # perform Copy-on-Write and clear the reference
+ copy = True
+ self._clear_reference_block(blkno)
iloc = self.blklocs[loc]
- blk.set_inplace(slice(iloc, iloc + 1), value)
+ blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)
return
nb = new_block_2d(value, placement=blk._mgr_locs)
old_blocks = self.blocks
new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]
self.blocks = new_blocks
+ self._clear_reference_block(blkno)
return
def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value) -> None:
@@ -1200,6 +1345,14 @@ def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value) -> None
This is a method on the BlockManager level, to avoid creating an
intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
"""
+ if _using_copy_on_write() and not self._has_no_reference(loc):
+ # otherwise perform Copy-on-Write and clear the reference
+ blkno = self.blknos[loc]
+ blocks = list(self.blocks)
+ blocks[blkno] = blocks[blkno].copy()
+ self.blocks = tuple(blocks)
+ self._clear_reference_block(blkno)
+
col_mgr = self.iget(loc)
new_mgr = col_mgr.setitem((idx,), value)
self.iset(loc, new_mgr._block.values, inplace=True)
@@ -1239,6 +1392,9 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
self.axes[0] = new_axis
self.blocks += (block,)
+ # TODO(CoW) do we always "own" the passed `value`?
+ if self.refs is not None:
+ self.refs += [None]
self._known_consolidated = False
@@ -1292,10 +1448,10 @@ def idelete(self, indexer) -> BlockManager:
is_deleted[indexer] = True
taker = (~is_deleted).nonzero()[0]
- nbs = self._slice_take_blocks_ax0(taker, only_slice=True)
+ nbs, new_refs = self._slice_take_blocks_ax0(taker, only_slice=True)
new_columns = self.items[~is_deleted]
axes = [new_columns, self.axes[1]]
- return type(self)(tuple(nbs), axes, verify_integrity=False)
+ return type(self)(tuple(nbs), axes, new_refs, verify_integrity=False)
# ----------------------------------------------------------------
# Block-wise Operation
@@ -1550,6 +1706,7 @@ def as_array(
-------
arr : ndarray
"""
+ # TODO(CoW) handle case where resulting array is a view
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose()
@@ -1674,7 +1831,10 @@ def _consolidate_inplace(self) -> None:
# the DataFrame's _item_cache. The exception is for newly-created
# BlockManager objects not yet attached to a DataFrame.
if not self.is_consolidated():
- self.blocks = tuple(_consolidate(self.blocks))
+ if self.refs is None:
+ self.blocks = _consolidate(self.blocks)
+ else:
+ self.blocks, self.refs = _consolidate_with_refs(self.blocks, self.refs)
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
@@ -1696,6 +1856,7 @@ def __init__(
self,
block: Block,
axis: Index,
+ refs: list[weakref.ref | None] | None = None,
verify_integrity: bool = False,
fastpath=lib.no_default,
) -> None:
@@ -1713,15 +1874,23 @@ def __init__(
self.axes = [axis]
self.blocks = (block,)
+ self.refs = refs
@classmethod
- def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> SingleBlockManager:
+ def from_blocks(
+ cls,
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> SingleBlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
- return cls(blocks[0], axes[0], verify_integrity=False)
+ if refs is not None:
+ assert len(refs) == 1
+ return cls(blocks[0], axes[0], refs, verify_integrity=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:
@@ -1740,7 +1909,18 @@ def to_2d_mgr(self, columns: Index) -> BlockManager:
bp = BlockPlacement(0)
new_blk = type(blk)(arr, placement=bp, ndim=2)
axes = [columns, self.axes[0]]
- return BlockManager([new_blk], axes=axes, verify_integrity=False)
+ refs: list[weakref.ref | None] = [weakref.ref(blk)]
+ return BlockManager([new_blk], axes=axes, refs=refs, verify_integrity=False)
+
+ def _has_no_reference(self, i: int = 0) -> bool:
+ """
+ Check for column `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the column has no references.
+ """
+ return (self.refs is None or self.refs[0] is None) and weakref.getweakrefcount(
+ self.blocks[0]
+ ) == 0
def __getstate__(self):
block_values = [b.values for b in self.blocks]
@@ -1810,7 +1990,9 @@ def getitem_mgr(self, indexer: slice | npt.NDArray[np.bool_]) -> SingleBlockMana
block = type(blk)(array, placement=bp, ndim=1)
new_idx = self.index[indexer]
- return type(self)(block, new_idx)
+ # TODO(CoW) in theory only need to track reference if new_array is a view
+ ref = weakref.ref(blk)
+ return type(self)(block, new_idx, [ref])
def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
# Assertion disabled for performance
@@ -1823,7 +2005,7 @@ def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
bp = BlockPlacement(slice(0, len(array)))
block = type(blk)(array, placement=bp, ndim=1)
new_index = self.index._getitem_slice(slobj)
- return type(self)(block, new_index)
+ return type(self)(block, new_index, [weakref.ref(blk)])
@property
def index(self) -> Index:
@@ -1850,15 +2032,30 @@ def array_values(self):
def get_numeric_data(self, copy: bool = False):
if self._block.is_numeric:
- if copy:
- return self.copy()
- return self
+ return self.copy(deep=copy)
return self.make_empty()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
+ def setitem_inplace(self, indexer, value) -> None:
+ """
+ Set values with indexer.
+
+ For Single[Block/Array]Manager, this backs s[indexer] = value
+
+ This is an inplace version of `setitem()`, mutating the manager/values
+ in place, not returning a new Manager (and Block), and thus never changing
+ the dtype.
+ """
+ if _using_copy_on_write() and not self._has_no_reference(0):
+ self.blocks = (self._block.copy(),)
+ self.refs = None
+ self._cache.clear()
+
+ super().setitem_inplace(indexer, value)
+
def idelete(self, indexer) -> SingleBlockManager:
"""
Delete single location from SingleBlockManager.
@@ -1869,6 +2066,8 @@ def idelete(self, indexer) -> SingleBlockManager:
self.blocks = (nb,)
self.axes[0] = self.axes[0].delete(indexer)
self._cache.clear()
+ # clear reference since delete always results in a new array
+ self.refs = None
return self
def fast_xs(self, loc):
@@ -1885,6 +2084,9 @@ def set_values(self, values: ArrayLike):
Use at your own risk! This does not check if the passed values are
valid for the current Block/SingleBlockManager (length, dtype, etc).
"""
+ # TODO(CoW) do we need to handle copy on write here? Currently this is
+ # only used for FrameColumnApply.series_generator (what if apply is
+ # mutating inplace?)
self.blocks[0].values = values
self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))
@@ -2068,7 +2270,7 @@ def _stack_arrays(tuples, dtype: np.dtype):
return stacked, placement
-def _consolidate(blocks: tuple[Block, ...]) -> list[Block]:
+def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
@@ -2078,19 +2280,44 @@ def _consolidate(blocks: tuple[Block, ...]) -> list[Block]:
new_blocks: list[Block] = []
for (_can_consolidate, dtype), group_blocks in grouper:
- merged_blocks = _merge_blocks(
+ merged_blocks, _ = _merge_blocks(
+ list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
+ )
+ new_blocks = extend_blocks(merged_blocks, new_blocks)
+ return tuple(new_blocks)
+
+
+def _consolidate_with_refs(
+ blocks: tuple[Block, ...], refs
+) -> tuple[tuple[Block, ...], list[weakref.ref | None]]:
+ """
+ Merge blocks having same dtype, exclude non-consolidating blocks, handling
+ refs
+ """
+ gkey = lambda x: x[0]._consolidate_key
+ grouper = itertools.groupby(sorted(zip(blocks, refs), key=gkey), gkey)
+
+ new_blocks: list[Block] = []
+ new_refs: list[weakref.ref | None] = []
+ for (_can_consolidate, dtype), group_blocks_refs in grouper:
+ group_blocks, group_refs = list(zip(*list(group_blocks_refs)))
+ merged_blocks, consolidated = _merge_blocks(
list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks = extend_blocks(merged_blocks, new_blocks)
- return new_blocks
+ if consolidated:
+ new_refs.extend([None])
+ else:
+ new_refs.extend(group_refs)
+ return tuple(new_blocks), new_refs
def _merge_blocks(
blocks: list[Block], dtype: DtypeObj, can_consolidate: bool
-) -> list[Block]:
+) -> tuple[list[Block], bool]:
if len(blocks) == 1:
- return blocks
+ return blocks, False
if can_consolidate:
@@ -2116,10 +2343,10 @@ def _merge_blocks(
new_mgr_locs = new_mgr_locs[argsort]
bp = BlockPlacement(new_mgr_locs)
- return [new_block_2d(new_values, placement=bp)]
+ return [new_block_2d(new_values, placement=bp)], True
# can't consolidate --> no merge
- return blocks
+ return blocks, False
def _fast_count_smallints(arr: npt.NDArray[np.intp]):
@@ -2152,3 +2379,7 @@ def _preprocess_slice_or_indexer(
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
+
+
+def _using_copy_on_write():
+ return get_option("mode.copy_on_write")
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index 1160d3b2a8e3a..5febb302a9de9 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -36,7 +36,7 @@ def _iter_block_pairs(
left_ea = blk_vals.ndim == 1
- rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
+ rblks, _ = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
# Assertions are disabled for performance, but should hold:
# if left_ea:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f55d6a26255a0..f5e44c732aff8 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1301,7 +1301,16 @@ def _maybe_update_cacher(
# a copy
if ref is None:
del self._cacher
- elif len(self) == len(ref) and self.name in ref.columns:
+ # for CoW, we never want to update the parent DataFrame cache
+ # if the Series changed, and always pop the cached item
+ elif (
+ not (
+ get_option("mode.copy_on_write")
+ and get_option("mode.data_manager") == "block"
+ )
+ and len(self) == len(ref)
+ and self.name in ref.columns
+ ):
# GH#42530 self.name must be in ref.columns
# to ensure column still in dataframe
# otherwise, either self or ref has swapped in new arrays
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 22b8f9d020a5f..a0b9a0d247533 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -341,8 +341,10 @@ def convert_delta_safe(base, deltas, unit) -> Series:
has_bad_values = False
if bad_locs.any():
has_bad_values = True
- data_col = Series(dates)
- data_col[bad_locs] = 1.0 # Replace with NaT
+ # reset cache to avoid SettingWithCopy checks (we own the DataFrame and the
+ # `dates` Series is used to overwrite itself in the DataFramae)
+ dates._reset_cacher()
+ dates[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index ff3abaf819206..d2882f46d25bf 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1440,7 +1440,7 @@ def test_apply_dtype(col):
tm.assert_series_equal(result, expected)
-def test_apply_mutating(using_array_manager):
+def test_apply_mutating(using_array_manager, using_copy_on_write):
# GH#35462 case where applied func pins a new BlockManager to a row
df = DataFrame({"a": range(100), "b": range(100, 200)})
df_orig = df.copy()
@@ -1457,12 +1457,13 @@ def func(row):
result = df.apply(func, axis=1)
tm.assert_frame_equal(result, expected)
- if not using_array_manager:
+ if using_copy_on_write or using_array_manager:
+ # INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent
# INFO(ArrayManager) With BlockManager, the row is a view and mutated in place,
# with ArrayManager the row is not a view, and thus not mutated in place
- tm.assert_frame_equal(df, result)
- else:
tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, result)
def test_apply_empty_list_reduce():
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 6eb7237c4f41c..d917a3c79aa97 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -150,7 +150,7 @@ def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
ids=["slice", "mask", "array"],
)
def test_subset_loc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager
+ dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
):
# Case: taking a subset of the rows+columns of a DataFrame using .loc
# + afterwards modifying the subset
@@ -177,7 +177,7 @@ def test_subset_loc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or dtype == "int64")
+ and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
@@ -197,7 +197,7 @@ def test_subset_loc_rows_columns(
ids=["slice", "mask", "array"],
)
def test_subset_iloc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager
+ dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
):
# Case: taking a subset of the rows+columns of a DataFrame using .iloc
# + afterwards modifying the subset
@@ -224,7 +224,7 @@ def test_subset_iloc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or dtype == "int64")
+ and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
diff --git a/pandas/tests/copy_view/test_internals.py b/pandas/tests/copy_view/test_internals.py
new file mode 100644
index 0000000000000..2191fc1b33218
--- /dev/null
+++ b/pandas/tests/copy_view/test_internals.py
@@ -0,0 +1,45 @@
+import numpy as np
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+from pandas.tests.copy_view.util import get_array
+
+
+@td.skip_array_manager_invalid_test
+def test_consolidate(using_copy_on_write):
+
+ # create unconsolidated DataFrame
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
+ df["c"] = [4, 5, 6]
+
+ # take a viewing subset
+ subset = df[:]
+
+ # each block of subset references a block of df
+ assert subset._mgr.refs is not None and all(
+ ref is not None for ref in subset._mgr.refs
+ )
+
+ # consolidate the two int64 blocks
+ subset._consolidate_inplace()
+
+ # the float64 block still references the parent one because it still a view
+ assert subset._mgr.refs[0] is not None
+ # equivalent of assert np.shares_memory(df["b"].values, subset["b"].values)
+ # but avoids caching df["b"]
+ assert np.shares_memory(get_array(df, "b"), get_array(subset, "b"))
+
+ # the new consolidated int64 block does not reference another
+ assert subset._mgr.refs[1] is None
+
+ # the parent dataframe now also only is linked for the float column
+ assert df._mgr._has_no_reference(0)
+ assert not df._mgr._has_no_reference(1)
+ assert df._mgr._has_no_reference(2)
+
+ # and modifying subset still doesn't modify parent
+ if using_copy_on_write:
+ subset.iloc[0, 1] = 0.0
+ assert df._mgr._has_no_reference(1)
+ assert df.loc[0, "b"] == 0.1
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 1ed458e95b78e..cc4c219e6c5d9 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1,6 +1,9 @@
import numpy as np
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ Series,
+)
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array
@@ -126,3 +129,47 @@ def test_reindex_columns(using_copy_on_write):
if using_copy_on_write:
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
tm.assert_frame_equal(df, df_orig)
+
+
+def test_select_dtypes(using_copy_on_write):
+ # Case: selecting columns using `select_dtypes()` returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.select_dtypes("int64")
+ df2._mgr._verify_integrity()
+
+ # currently this always returns a "view"
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+
+ # mutating df2 triggers a copy-on-write for that column/block
+ df2.iloc[0, 0] = 0
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # but currently select_dtypes() actually returns a view -> mutates parent
+ df_orig.iloc[0, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_to_frame(using_copy_on_write):
+ # Case: converting a Series to a DataFrame with to_frame
+ ser = Series([1, 2, 3])
+ ser_orig = ser.copy()
+
+ df = ser.to_frame()
+
+ # currently this always returns a "view"
+ assert np.shares_memory(ser.values, get_array(df, 0))
+
+ df.iloc[0, 0] = 0
+
+ if using_copy_on_write:
+ # mutating df triggers a copy-on-write for that column
+ assert not np.shares_memory(ser.values, get_array(df, 0))
+ tm.assert_series_equal(ser, ser_orig)
+ else:
+ # but currently select_dtypes() actually returns a view -> mutates parent
+ ser_orig.iloc[0] = 0
+ tm.assert_series_equal(ser, ser_orig)
diff --git a/pandas/tests/copy_view/test_setitem.py b/pandas/tests/copy_view/test_setitem.py
index dd42983179806..9e0d350dde0de 100644
--- a/pandas/tests/copy_view/test_setitem.py
+++ b/pandas/tests/copy_view/test_setitem.py
@@ -34,8 +34,9 @@ def test_set_column_with_series(using_copy_on_write):
df["c"] = ser
if using_copy_on_write:
- # with CoW we can delay the copy
- assert np.shares_memory(df["c"].values, ser.values)
+ # TODO(CoW) with CoW we can delay the copy
+ # assert np.shares_memory(df["c"].values, ser.values)
+ assert not np.shares_memory(df["c"].values, ser.values)
else:
# the series data is copied
assert not np.shares_memory(df["c"].values, ser.values)
@@ -78,8 +79,9 @@ def test_set_columns_with_dataframe(using_copy_on_write):
df[["c", "d"]] = df2
if using_copy_on_write:
- # with CoW we can delay the copy
- assert np.shares_memory(df["c"].values, df2["c"].values)
+ # TODO(CoW) with CoW we can delay the copy
+ # assert np.shares_memory(df["c"].values, df2["c"].values)
+ assert not np.shares_memory(df["c"].values, df2["c"].values)
else:
# the data is copied
assert not np.shares_memory(df["c"].values, df2["c"].values)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 04fa3c11a6c40..775d9c4cbcc45 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -398,6 +398,7 @@ def test_setitem_frame_2d_values(self, data):
# Avoiding using_array_manager fixture
# https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410
using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager)
+ using_copy_on_write = pd.options.mode.copy_on_write
blk_data = df._mgr.arrays[0]
@@ -422,7 +423,7 @@ def test_setitem_frame_2d_values(self, data):
with tm.assert_produces_warning(warn, match=msg):
df.iloc[:] = df.values
self.assert_frame_equal(df, orig)
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
# GH#33457 Check that this setting occurred in-place
# FIXME(ArrayManager): this should work there too
assert df._mgr.arrays[0] is blk_data
diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py
index 7994c56f8d68b..a98fa52e1009d 100644
--- a/pandas/tests/frame/indexing/test_getitem.py
+++ b/pandas/tests/frame/indexing/test_getitem.py
@@ -357,12 +357,18 @@ def test_getitem_empty_frame_with_boolean(self):
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
- def test_getitem_returns_view_when_column_is_unique_in_df(self):
+ def test_getitem_returns_view_when_column_is_unique_in_df(
+ self, using_copy_on_write
+ ):
# GH#45316
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
+ df_orig = df.copy()
view = df["b"]
view.loc[:] = 100
- expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"])
+ if using_copy_on_write:
+ expected = df_orig
+ else:
+ expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"])
tm.assert_frame_equal(df, expected)
def test_getitem_frozenset_unique_in_column(self):
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9027ce8109810..6eecf4c18f182 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -269,7 +269,7 @@ def test_setattr_column(self):
df.foobar = 5
assert (df.foobar == 5).all()
- def test_setitem(self, float_frame):
+ def test_setitem(self, float_frame, using_copy_on_write):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
@@ -305,8 +305,12 @@ def test_setitem(self, float_frame):
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # With CoW, adding a new column doesn't raise a warning
smaller["col10"] = ["1", "2"]
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
@@ -536,22 +540,29 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
- def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
+ def test_fancy_getitem_slice_mixed(
+ self, float_frame, float_string_frame, using_copy_on_write
+ ):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
+ original = float_frame.copy()
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
- sliced.loc[:, "C"] = 4.0
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ sliced.loc[:, "C"] = 4.0
- assert (float_frame["C"] == 4).all()
+ assert (float_frame["C"] == 4).all()
+ else:
+ sliced.loc[:, "C"] = 4.0
+ tm.assert_frame_equal(float_frame, original)
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
@@ -994,7 +1005,7 @@ def test_iloc_row(self):
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
- def test_iloc_row_slice_view(self, using_array_manager):
+ def test_iloc_row_slice_view(self, using_array_manager, using_copy_on_write):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
@@ -1004,14 +1015,17 @@ def test_iloc_row_slice_view(self, using_array_manager):
assert np.shares_memory(df[2], subset[2])
+ exp_col = original[2].copy()
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
subset.loc[:, 2] = 0.0
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ subset.loc[:, 2] = 0.0
- exp_col = original[2].copy()
- # TODO(ArrayManager) verify it is expected that the original didn't change
- if not using_array_manager:
- exp_col._values[4:8] = 0.0
+ # TODO(ArrayManager) verify it is expected that the original didn't change
+ if not using_array_manager:
+ exp_col._values[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
@@ -1036,14 +1050,13 @@ def test_iloc_col(self):
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
- def test_iloc_col_slice_view(self, using_array_manager):
+ def test_iloc_col_slice_view(self, using_array_manager, using_copy_on_write):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
# verify slice is view
-
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
@@ -1053,7 +1066,9 @@ def test_iloc_col_slice_view(self, using_array_manager):
assert (df[8] == 0).all()
else:
- # TODO(ArrayManager) verify this is the desired behaviour
+ if using_copy_on_write:
+ # verify slice is view
+ assert np.shares_memory(df[8]._values, subset[8]._values)
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 6b19738becc8e..6d6ae9d646d4e 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -801,7 +801,7 @@ def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
class TestDataFrameSetItemWithExpansion:
- def test_setitem_listlike_views(self):
+ def test_setitem_listlike_views(self, using_copy_on_write):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
@@ -814,7 +814,10 @@ def test_setitem_listlike_views(self):
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
- expected = Series([100, 2, 3], name="a")
+ if using_copy_on_write:
+ expected = Series([1, 2, 3], name="a")
+ else:
+ expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
@@ -824,7 +827,7 @@ def test_setitem_string_column_numpy_dtype_raising(self):
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
- def test_setitem_empty_df_duplicate_columns(self):
+ def test_setitem_empty_df_duplicate_columns(self, using_copy_on_write):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
@@ -1134,7 +1137,9 @@ def test_setitem_always_copy(self, float_frame):
assert notna(s[5:10]).all()
@pytest.mark.parametrize("consolidate", [True, False])
- def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):
+ def test_setitem_partial_column_inplace(
+ self, consolidate, using_array_manager, using_copy_on_write
+ ):
# This setting should be in-place, regardless of whether frame is
# single-block or multi-block
# GH#304 this used to be incorrectly not-inplace, in which case
@@ -1159,8 +1164,9 @@ def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):
tm.assert_series_equal(df["z"], expected)
# check setting occurred in-place
- tm.assert_numpy_array_equal(zvals, expected.values)
- assert np.shares_memory(zvals, df["z"]._values)
+ if not using_copy_on_write:
+ tm.assert_numpy_array_equal(zvals, expected.values)
+ assert np.shares_memory(zvals, df["z"]._values)
def test_setitem_duplicate_columns_not_inplace(self):
# GH#39510
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index 898722d6d77ae..5951c1dd6e45e 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -111,13 +111,17 @@ def test_xs_keep_level(self):
result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
tm.assert_frame_equal(result, expected)
- def test_xs_view(self, using_array_manager):
+ def test_xs_view(self, using_array_manager, using_copy_on_write):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
+ df_orig = dm.copy()
- if using_array_manager:
+ if using_copy_on_write:
+ dm.xs(2)[:] = 20
+ tm.assert_frame_equal(dm, df_orig)
+ elif using_array_manager:
# INFO(ArrayManager) with ArrayManager getting a row as a view is
# not possible
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
@@ -176,27 +180,41 @@ def test_xs_level_eq_2(self):
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
- def test_xs_setting_with_copy_error(self, multiindex_dataframe_random_data):
+ def test_xs_setting_with_copy_error(
+ self, multiindex_dataframe_random_data, using_copy_on_write
+ ):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
+ df_orig = df.copy()
result = df.xs("two", level="second")
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
result[:] = 10
+ else:
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ result[:] = 10
+ tm.assert_frame_equal(df, df_orig)
- def test_xs_setting_with_copy_error_multiple(self, four_level_index_dataframe):
+ def test_xs_setting_with_copy_error_multiple(
+ self, four_level_index_dataframe, using_copy_on_write
+ ):
# this is a copy in 0.14
df = four_level_index_dataframe
+ df_orig = df.copy()
result = df.xs(("a", 4), level=["one", "four"])
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
result[:] = 10
+ else:
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ result[:] = 10
+ tm.assert_frame_equal(df, df_orig)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data):
@@ -359,15 +377,20 @@ def test_xs_droplevel_false(self):
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
- def test_xs_droplevel_false_view(self, using_array_manager):
+ def test_xs_droplevel_false_view(self, using_array_manager, using_copy_on_write):
# GH#37832
df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
# check that result still views the same data as df
assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values)
- # modifying original df also modifies result when having a single block
+
df.iloc[0, 0] = 2
- expected = DataFrame({"a": [2]})
+ if using_copy_on_write:
+ # with copy on write the subset is never modified
+ expected = DataFrame({"a": [1]})
+ else:
+ # modifying original df also modifies result when having a single block
+ expected = DataFrame({"a": [2]})
tm.assert_frame_equal(result, expected)
# with mixed dataframe, modifying the parent doesn't modify result
@@ -375,7 +398,10 @@ def test_xs_droplevel_false_view(self, using_array_manager):
df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
df.iloc[0, 0] = 2
- if using_array_manager:
+ if using_copy_on_write:
+ # with copy on write the subset is never modified
+ expected = DataFrame({"a": [1]})
+ elif using_array_manager:
# Here the behavior is consistent
expected = DataFrame({"a": [2]})
else:
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index d86c1b2aedcac..20e59ed72666a 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -20,13 +20,16 @@
class TestFillNA:
@td.skip_array_manager_not_yet_implemented
- def test_fillna_on_column_view(self):
+ def test_fillna_on_column_view(self, using_copy_on_write):
# GH#46149 avoid unnecessary copies
arr = np.full((40, 50), np.nan)
df = DataFrame(arr)
df[0].fillna(-1, inplace=True)
- assert (arr[:, 0] == -1).all()
+ if using_copy_on_write:
+ assert np.isnan(arr[:, 0]).all()
+ else:
+ assert (arr[:, 0] == -1).all()
# i.e. we didn't create a new 49-column block
assert len(df._mgr.arrays) == 1
@@ -676,14 +679,18 @@ def test_fillna_inplace_with_columns_limit_and_value(self):
@td.skip_array_manager_invalid_test
@pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}])
- def test_inplace_dict_update_view(self, val):
+ def test_inplace_dict_update_view(self, val, using_copy_on_write):
# GH#47188
df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]})
+ df_orig = df.copy()
result_view = df[:]
df.fillna(val, inplace=True)
expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]})
tm.assert_frame_equal(df, expected)
- tm.assert_frame_equal(result_view, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(result_view, df_orig)
+ else:
+ tm.assert_frame_equal(result_view, expected)
def test_single_block_df_with_horizontal_axis(self):
# GH 47713
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 98f9d2670074d..7d6cf43c530a7 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -303,7 +303,10 @@ def test_interp_raise_on_all_object_dtype(self):
with pytest.raises(TypeError, match=msg):
df.interpolate()
- def test_interp_inplace(self):
+ def test_interp_inplace(self, using_copy_on_write):
+ # TODO(CoW) inplace keyword (it is still mutating the parent)
+ if using_copy_on_write:
+ pytest.skip("CoW: inplace keyword not yet handled")
df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]})
expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]})
result = df.copy()
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index b1594660caec6..f4443953a0d52 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -171,16 +171,21 @@ def test_rename_multiindex(self):
tm.assert_index_equal(renamed.index, new_index)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem copy/view
- def test_rename_nocopy(self, float_frame):
+ def test_rename_nocopy(self, float_frame, using_copy_on_write):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values)
- with tm.assert_produces_warning(None):
+ # TODO(CoW) this also shouldn't warn in case of CoW, but the heuristic
+ # checking if the array shares memory doesn't work if CoW happened
+ with tm.assert_produces_warning(FutureWarning if using_copy_on_write else None):
# This loc setitem already happens inplace, so no warning
# that this will change in the future
renamed.loc[:, "foo"] = 1.0
- assert (float_frame["C"] == 1.0).all()
+ if using_copy_on_write:
+ assert not (float_frame["C"] == 1.0).all()
+ else:
+ assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
diff --git a/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/pandas/tests/frame/methods/test_to_dict_of_blocks.py
index c81bed9d93cc4..eb9b78610a112 100644
--- a/pandas/tests/frame/methods/test_to_dict_of_blocks.py
+++ b/pandas/tests/frame/methods/test_to_dict_of_blocks.py
@@ -27,7 +27,7 @@ def test_copy_blocks(self, float_frame):
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
- def test_no_copy_blocks(self, float_frame):
+ def test_no_copy_blocks(self, float_frame, using_copy_on_write):
# GH#9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
@@ -38,8 +38,11 @@ def test_no_copy_blocks(self, float_frame):
if column in _df:
_df.loc[:, column] = _df[column] + 1
- # make sure we did change the original DataFrame
- assert _df[column].equals(df[column])
+ if not using_copy_on_write:
+ # make sure we did change the original DataFrame
+ assert _df[column].equals(df[column])
+ else:
+ assert not _df[column].equals(df[column])
def test_to_dict_of_blocks_item_cache():
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index d3257ac09a0ab..a35530100a425 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -140,22 +140,29 @@ def test_update_datetime_tz(self):
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
- def test_update_with_different_dtype(self):
+ def test_update_with_different_dtype(self, using_copy_on_write):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
- df["c"].update(Series(["foo"], index=[0]))
+ if using_copy_on_write:
+ df.update({"c": Series(["foo"], index=[0])})
+ else:
+ df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]})
tm.assert_frame_equal(df, expected)
@td.skip_array_manager_invalid_test
- def test_update_modify_view(self):
+ def test_update_modify_view(self, using_copy_on_write):
# GH#47188
df = DataFrame({"A": ["1", np.nan], "B": ["100", np.nan]})
df2 = DataFrame({"A": ["a", "x"], "B": ["100", "200"]})
+ df2_orig = df2.copy()
result_view = df2[:]
df2.update(df)
expected = DataFrame({"A": ["1", "x"], "B": ["100", "200"]})
tm.assert_frame_equal(df2, expected)
- tm.assert_frame_equal(result_view, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(result_view, df2_orig)
+ else:
+ tm.assert_frame_equal(result_view, expected)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index af092d433a846..cb97e2bfb6202 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -323,7 +323,9 @@ def test_attrs(self):
assert result.attrs == {"version": 1}
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
- def test_set_flags(self, allows_duplicate_labels, frame_or_series):
+ def test_set_flags(
+ self, allows_duplicate_labels, frame_or_series, using_copy_on_write
+ ):
obj = DataFrame({"A": [1, 2]})
key = (0, 0)
if frame_or_series is Series:
@@ -345,15 +347,25 @@ def test_set_flags(self, allows_duplicate_labels, frame_or_series):
assert obj.flags.allows_duplicate_labels is True
# But we didn't copy data
+ if frame_or_series is Series:
+ assert np.may_share_memory(obj.values, result.values)
+ else:
+ assert np.may_share_memory(obj["A"].values, result["A"].values)
+
result.iloc[key] = 0
- assert obj.iloc[key] == 0
+ if using_copy_on_write:
+ assert obj.iloc[key] == 1
+ else:
+ assert obj.iloc[key] == 0
+ # set back to 1 for test below
+ result.iloc[key] = 1
# Now we do copy.
result = obj.set_flags(
copy=True, allows_duplicate_labels=allows_duplicate_labels
)
result.iloc[key] = 10
- assert obj.iloc[key] == 0
+ assert obj.iloc[key] == 1
def test_constructor_expanddim(self):
# GH#33628 accessing _constructor_expanddim should not raise NotImplementedError
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 8aa0e980b01c4..46c712cf4d458 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -334,7 +334,7 @@ def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
- def test_stale_cached_series_bug_473(self):
+ def test_stale_cached_series_bug_473(self, using_copy_on_write):
# this is chained, but ok
with option_context("chained_assignment", None):
@@ -349,9 +349,12 @@ def test_stale_cached_series_bug_473(self):
repr(Y)
result = Y.sum() # noqa
exp = Y["g"].sum() # noqa
- assert pd.isna(Y["g"]["c"])
+ if using_copy_on_write:
+ assert not pd.isna(Y["g"]["c"])
+ else:
+ assert pd.isna(Y["g"]["c"])
- def test_strange_column_corruption_issue(self):
+ def test_strange_column_corruption_issue(self, using_copy_on_write):
# TODO(wesm): Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = np.nan
@@ -363,7 +366,10 @@ def test_strange_column_corruption_issue(self):
if col not in wasCol:
wasCol[col] = 1
df[col] = np.nan
- df[col][dt] = i
+ if using_copy_on_write:
+ df.loc[dt, col] = i
+ else:
+ df[col][dt] = i
myid = 100
@@ -396,7 +402,7 @@ def test_add_column_with_pandas_array(self):
tm.assert_frame_equal(df, df2)
-def test_update_inplace_sets_valid_block_values():
+def test_update_inplace_sets_valid_block_values(using_copy_on_write):
# https://github.com/pandas-dev/pandas/issues/33457
df = DataFrame({"a": Series([1, 2, None], dtype="category")})
@@ -406,8 +412,9 @@ def test_update_inplace_sets_valid_block_values():
# check we haven't put a Series into any block.values
assert isinstance(df._mgr.blocks[0].values, Categorical)
- # smoketest for OP bug from GH#35731
- assert df.isnull().sum().sum() == 0
+ if not using_copy_on_write:
+ # smoketest for OP bug from GH#35731
+ assert df.isnull().sum().sum() == 0
def test_nonconsolidated_item_cache_take():
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 4c2e9b8530e81..6ad2f35bd2a6a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -271,15 +271,22 @@ def test_constructor_dtype_copy(self):
new_df["col1"] = 200.0
assert orig_df["col1"][0] == 1.0
- def test_constructor_dtype_nocast_view_dataframe(self):
+ def test_constructor_dtype_nocast_view_dataframe(self, using_copy_on_write):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
- should_be_view[0][0] = 99
- assert df.values[0, 0] == 99
+ if using_copy_on_write:
+ # INFO(CoW) doesn't mutate original
+ should_be_view.iloc[0, 0] = 99
+ assert df.values[0, 0] == 1
+ else:
+ should_be_view[0][0] = 99
+ assert df.values[0, 0] == 99
- def test_constructor_dtype_nocast_view_2d_array(self, using_array_manager):
+ def test_constructor_dtype_nocast_view_2d_array(
+ self, using_array_manager, using_copy_on_write
+ ):
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
@@ -2523,7 +2530,13 @@ def test_constructor_list_str_na(self, string_dtype):
@pytest.mark.parametrize("copy", [False, True])
def test_dict_nocopy(
- self, request, copy, any_numeric_ea_dtype, any_numpy_dtype, using_array_manager
+ self,
+ request,
+ copy,
+ any_numeric_ea_dtype,
+ any_numpy_dtype,
+ using_array_manager,
+ using_copy_on_write,
):
if (
using_array_manager
@@ -2597,7 +2610,7 @@ def check_views(c_only: bool = False):
with tm.assert_produces_warning(FutureWarning, match="will attempt to set"):
df.iloc[:, 2] = pd.array([45, 46], dtype=c.dtype)
assert df.dtypes.iloc[2] == c.dtype
- if not copy:
+ if not copy and not using_copy_on_write:
check_views(True)
if copy:
@@ -2609,7 +2622,7 @@ def check_views(c_only: bool = False):
assert b[0] == b.dtype.type(3)
# FIXME(GH#35417): enable after GH#35417
assert c[0] == c_orig[0] # i.e. df.iloc[0, 2]=45 did *not* update c
- else:
+ elif not using_copy_on_write:
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
assert c[0] == 45 # i.e. df.iloc[0, 2]=45 *did* update c
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 72e7e458b4e1f..2cf1cf0f15652 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -12,16 +12,20 @@
class TestPeriodIndex:
- def test_getitem_periodindex_duplicates_string_slice(self):
+ def test_getitem_periodindex_duplicates_string_slice(self, using_copy_on_write):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
ts = Series(np.random.randn(len(idx)), index=idx)
+ original = ts.copy()
result = ts["2007"]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
- assert (ts[1:3] == 1).all()
+ if using_copy_on_write:
+ tm.assert_series_equal(ts, original)
+ else:
+ assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN")
diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
index 479cd9952f75b..2efb288a73f8d 100644
--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
@@ -12,7 +12,7 @@
import pandas._testing as tm
-def test_detect_chained_assignment():
+def test_detect_chained_assignment(using_copy_on_write):
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
@@ -29,17 +29,21 @@ def test_detect_chained_assignment():
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
zed["eyes"]["right"].fillna(value=555, inplace=True)
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ zed["eyes"]["right"].fillna(value=555, inplace=True)
@td.skip_array_manager_invalid_test # with ArrayManager df.loc[0] is not a view
-def test_cache_updating():
+def test_cache_updating(using_copy_on_write):
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
+ df_original = df.copy()
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
@@ -48,7 +52,10 @@ def test_cache_updating():
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
- assert result == 1
+ if using_copy_on_write:
+ assert result == df_original.loc[0, "z"]
+ else:
+ assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index a57b363c0a448..cface630c6647 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -122,7 +122,9 @@ def test_getitem_partial_column_select(self):
# TODO(ArrayManager) rewrite test to not use .values
# exp.loc[2000, 4].values[:] select multiple columns -> .values is not a view
@td.skip_array_manager_invalid_test
- def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
+ def test_partial_set(
+ self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write
+ ):
# GH #397
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd.copy()
@@ -132,7 +134,8 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
tm.assert_frame_equal(df, exp)
df["A"].loc[2000, 4] = 1
- exp["A"].loc[2000, 4].values[:] = 1
+ if not using_copy_on_write:
+ exp["A"].loc[2000, 4].values[:] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
@@ -141,7 +144,10 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
# this works...for now
df["A"].iloc[14] = 5
- assert df["A"].iloc[14] == 5
+ if using_copy_on_write:
+ df["A"].iloc[14] == exp["A"].iloc[14]
+ else:
+ assert df["A"].iloc[14] == 5
@pytest.mark.parametrize("dtype", [int, float])
def test_getitem_intkey_leading_level(
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index 20569061cfa4c..ac10a6d82dc89 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -196,7 +196,7 @@ def test_multiindex_assignment(self):
df.loc[4, "d"] = arr
tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d"))
- def test_multiindex_assignment_single_dtype(self, using_array_manager):
+ def test_multiindex_assignment_single_dtype(self, using_copy_on_write):
# GH3777 part 2b
# single dtype
arr = np.array([0.0, 1.0])
@@ -216,7 +216,8 @@ def test_multiindex_assignment_single_dtype(self, using_array_manager):
tm.assert_series_equal(result, exp)
# extra check for inplace-ness
- tm.assert_numpy_array_equal(view, exp.values)
+ if not using_copy_on_write:
+ tm.assert_numpy_array_equal(view, exp.values)
# arr + 0.5 cannot be cast losslessly to int, so we upcast
df.loc[4, "c"] = arr + 0.5
@@ -405,16 +406,23 @@ def test_setitem_change_dtype(self, multiindex_dataframe_random_data):
reindexed = dft.reindex(columns=[("foo", "two")])
tm.assert_series_equal(reindexed["foo", "two"], s > s.median())
- def test_set_column_scalar_with_loc(self, multiindex_dataframe_random_data):
+ def test_set_column_scalar_with_loc(
+ self, multiindex_dataframe_random_data, using_copy_on_write
+ ):
frame = multiindex_dataframe_random_data
subset = frame.index[[1, 4, 5]]
frame.loc[subset] = 99
assert (frame.loc[subset].values == 99).all()
+ frame_original = frame.copy()
col = frame["B"]
col[subset] = 97
- assert (frame.loc[subset, "B"] == 97).all()
+ if using_copy_on_write:
+ # chained setitem doesn't work with CoW
+ tm.assert_frame_equal(frame, frame_original)
+ else:
+ assert (frame.loc[subset, "B"] == 97).all()
def test_nonunique_assignment_1750(self):
df = DataFrame(
@@ -487,21 +495,32 @@ def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
assert (df["foo"].values == 0).all()
-def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
+def test_frame_setitem_copy_raises(
+ multiindex_dataframe_random_data, using_copy_on_write
+):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) it would be nice if this could still warn/raise
df["foo"]["one"] = 2
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["foo"]["one"] = 2
-def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
+def test_frame_setitem_copy_no_write(
+ multiindex_dataframe_random_data, using_copy_on_write
+):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df["foo"]["one"] = 2
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["foo"]["one"] = 2
result = df
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index adc001695579c..81914e1b8052f 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -32,7 +32,7 @@ def random_text(nobs=100):
class TestCaching:
- def test_slice_consolidate_invalidate_item_cache(self):
+ def test_slice_consolidate_invalidate_item_cache(self, using_copy_on_write):
# this is chained assignment, but will 'work'
with option_context("chained_assignment", None):
@@ -52,7 +52,11 @@ def test_slice_consolidate_invalidate_item_cache(self):
# Assignment to wrong series
df["bb"].iloc[0] = 0.17
df._clear_item_cache()
- tm.assert_almost_equal(df["bb"][0], 0.17)
+ if not using_copy_on_write:
+ tm.assert_almost_equal(df["bb"][0], 0.17)
+ else:
+ # with ArrayManager, parent is not mutated with chained assignment
+ tm.assert_almost_equal(df["bb"][0], 2.2)
@pytest.mark.parametrize("do_ref", [True, False])
def test_setitem_cache_updating(self, do_ref):
@@ -71,7 +75,7 @@ def test_setitem_cache_updating(self, do_ref):
assert df.loc[0, "c"] == 0.0
assert df.loc[7, "c"] == 1.0
- def test_setitem_cache_updating_slices(self):
+ def test_setitem_cache_updating_slices(self, using_copy_on_write):
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame(
@@ -92,12 +96,17 @@ def test_setitem_cache_updating_slices(self):
# try via a chain indexing
# this actually works
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
+ out_original = out.copy()
for ix, row in df.iterrows():
v = out[row["C"]][six:eix] + row["D"]
out[row["C"]][six:eix] = v
- tm.assert_frame_equal(out, expected)
- tm.assert_series_equal(out["A"], expected["A"])
+ if not using_copy_on_write:
+ tm.assert_frame_equal(out, expected)
+ tm.assert_series_equal(out["A"], expected["A"])
+ else:
+ tm.assert_frame_equal(out, out_original)
+ tm.assert_series_equal(out["A"], out_original["A"])
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
for ix, row in df.iterrows():
@@ -123,7 +132,7 @@ def test_altering_series_clears_parent_cache(self):
class TestChaining:
- def test_setitem_chained_setfault(self):
+ def test_setitem_chained_setfault(self, using_copy_on_write):
# GH6026
data = ["right", "left", "left", "left", "right", "left", "timeout"]
@@ -132,24 +141,38 @@ def test_setitem_chained_setfault(self):
df = DataFrame({"response": np.array(data)})
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, DataFrame({"response": data}))
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata}))
recarray = np.rec.fromarrays([data], names=["response"])
df = DataFrame(recarray)
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, DataFrame({"response": data}))
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata}))
df = DataFrame({"response": data, "response1": data})
+ df_original = df.copy()
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_original)
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data}))
# GH 6056
expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
df["A"].iloc[0] = np.nan
result = df.head()
+ if using_copy_on_write:
+ expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]})
+ else:
+ expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
@@ -158,7 +181,7 @@ def test_setitem_chained_setfault(self):
tm.assert_frame_equal(result, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment(self):
+ def test_detect_chained_assignment(self, using_copy_on_write):
with option_context("chained_assignment", "raise"):
# work with the chain
@@ -166,14 +189,20 @@ def test_detect_chained_assignment(self):
df = DataFrame(
np.arange(4).reshape(2, 2), columns=list("AB"), dtype="int64"
)
+ df_original = df.copy()
assert df._is_copy is None
df["A"][0] = -5
df["A"][1] = -6
- tm.assert_frame_equal(df, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_original)
+ else:
+ tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_raises(self, using_array_manager):
+ def test_detect_chained_assignment_raises(
+ self, using_array_manager, using_copy_on_write
+ ):
# test with the chaining
df = DataFrame(
@@ -182,9 +211,14 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
"B": np.array(np.arange(2, 4), dtype=np.float64),
}
)
+ df_original = df.copy()
assert df._is_copy is None
- if not using_array_manager:
+ if using_copy_on_write:
+ df["A"][0] = -5
+ df["A"][1] = -6
+ tm.assert_frame_equal(df, df_original)
+ elif not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
df["A"][0] = -5
@@ -192,7 +226,6 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
df["A"][1] = np.nan
assert df["A"]._is_copy is None
-
else:
# INFO(ArrayManager) for ArrayManager it doesn't matter that it's
# a mixed dataframe
@@ -203,7 +236,7 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_fails(self):
+ def test_detect_chained_assignment_fails(self, using_copy_on_write):
# Using a copy (the chain), fails
df = DataFrame(
@@ -213,11 +246,15 @@ def test_detect_chained_assignment_fails(self):
}
)
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
df.loc[0]["A"] = -5
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[0]["A"] = -5
@pytest.mark.arm_slow
- def test_detect_chained_assignment_doc_example(self):
+ def test_detect_chained_assignment_doc_example(self, using_copy_on_write):
# Doc example
df = DataFrame(
@@ -228,30 +265,43 @@ def test_detect_chained_assignment_doc_example(self):
)
assert df._is_copy is None
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
indexer = df.a.str.startswith("o")
df[indexer]["c"] = 42
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ indexer = df.a.str.startswith("o")
+ df[indexer]["c"] = 42
@pytest.mark.arm_slow
- def test_detect_chained_assignment_object_dtype(self, using_array_manager):
+ def test_detect_chained_assignment_object_dtype(
+ self, using_array_manager, using_copy_on_write
+ ):
expected = DataFrame({"A": [111, "bbb", "ccc"], "B": [1, 2, 3]})
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
- df.loc[0]["A"] = 111
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[0]["A"] = 111
- if not using_array_manager:
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
+ df["A"][0] = 111
+ tm.assert_frame_equal(df, df_original)
+ elif not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
df["A"][0] = 111
df.loc[0, "A"] = 111
+ tm.assert_frame_equal(df, expected)
else:
# INFO(ArrayManager) for ArrayManager it doesn't matter that it's
# a mixed dataframe
df["A"][0] = 111
-
- tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
def test_detect_chained_assignment_is_copy_pickle(self):
@@ -299,8 +349,9 @@ def test_detect_chained_assignment_implicit_take(self):
df["letters"] = df["letters"].apply(str.lower)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_implicit_take2(self):
-
+ def test_detect_chained_assignment_implicit_take2(self, using_copy_on_write):
+ if using_copy_on_write:
+ pytest.skip("_is_copy is not always set for CoW")
# Implicitly take 2
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
@@ -356,18 +407,26 @@ def test_detect_chained_assignment_false_positives(self):
str(df)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_undefined_column(self):
+ def test_detect_chained_assignment_undefined_column(self, using_copy_on_write):
# from SO:
# https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
df = DataFrame(np.arange(0, 9), columns=["count"])
df["group"] = "b"
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
df.iloc[0:5]["group"] = "a"
+ tm.assert_frame_equal(df, df_original)
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.iloc[0:5]["group"] = "a"
@pytest.mark.arm_slow
- def test_detect_chained_assignment_changing_dtype(self, using_array_manager):
+ def test_detect_chained_assignment_changing_dtype(
+ self, using_array_manager, using_copy_on_write
+ ):
# Mixed type setting but same dtype & changing dtype
df = DataFrame(
@@ -378,32 +437,45 @@ def test_detect_chained_assignment_changing_dtype(self, using_array_manager):
"D": ["a", "b", "c", "d", "e"],
}
)
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df.loc[2]["D"] = "foo"
-
- with pytest.raises(SettingWithCopyError, match=msg):
df.loc[2]["C"] = "foo"
+ df["C"][2] = "foo"
+ tm.assert_frame_equal(df, df_original)
+
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[2]["D"] = "foo"
- if not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[2]["C"] = "foo"
+
+ if not using_array_manager:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["C"][2] = "foo"
+ else:
+ # INFO(ArrayManager) for ArrayManager it doesn't matter if it's
+ # changing the dtype or not
df["C"][2] = "foo"
- else:
- # INFO(ArrayManager) for ArrayManager it doesn't matter if it's
- # changing the dtype or not
- df["C"][2] = "foo"
- assert df.loc[2, "C"] == "foo"
+ assert df.loc[2, "C"] == "foo"
- def test_setting_with_copy_bug(self):
+ def test_setting_with_copy_bug(self, using_copy_on_write):
# operating on a copy
df = DataFrame(
{"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
)
+ df_original = df.copy()
mask = pd.isna(df.c)
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df[["c"]][mask] = df[["b"]][mask]
+ tm.assert_frame_equal(df, df_original)
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df[["c"]][mask] = df[["b"]][mask]
def test_setting_with_copy_bug_no_warning(self):
# invalid warning as we are returning a new object
@@ -414,8 +486,12 @@ def test_setting_with_copy_bug_no_warning(self):
# this should not raise
df2["y"] = ["g", "h", "i"]
- def test_detect_chained_assignment_warnings_errors(self):
+ def test_detect_chained_assignment_warnings_errors(self, using_copy_on_write):
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
+ if using_copy_on_write:
+ df.loc[0]["A"] = 111
+ return
+
with option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(SettingWithCopyWarning):
df.loc[0]["A"] = 111
@@ -425,14 +501,23 @@ def test_detect_chained_assignment_warnings_errors(self):
df.loc[0]["A"] = 111
@pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})])
- def test_detect_chained_assignment_warning_stacklevel(self, rhs):
+ def test_detect_chained_assignment_warning_stacklevel(
+ self, rhs, using_copy_on_write
+ ):
# GH#42570
df = DataFrame(np.arange(25).reshape(5, 5))
+ df_original = df.copy()
chained = df.loc[:3]
with option_context("chained_assignment", "warn"):
- with tm.assert_produces_warning(SettingWithCopyWarning) as t:
- chained[2] = rhs
- assert t[0].filename == __file__
+ if not using_copy_on_write:
+ with tm.assert_produces_warning(SettingWithCopyWarning) as t:
+ chained[2] = rhs
+ assert t[0].filename == __file__
+ else:
+ # INFO(CoW) no warning, and original dataframe not changed
+ with tm.assert_produces_warning(None):
+ chained[2] = rhs
+ tm.assert_frame_equal(df, df_original)
# TODO(ArrayManager) fast_xs with array-like scalars is not yet working
@td.skip_array_manager_not_yet_implemented
@@ -483,7 +568,7 @@ def test_cache_updating2(self):
expected = Series([0, 0, 0, 2, 0], name="f")
tm.assert_series_equal(df.f, expected)
- def test_iloc_setitem_chained_assignment(self):
+ def test_iloc_setitem_chained_assignment(self, using_copy_on_write):
# GH#3970
with option_context("chained_assignment", None):
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
@@ -497,7 +582,10 @@ def test_iloc_setitem_chained_assignment(self):
df.iloc[ck]
df["bb"].iloc[0] = 0.15
- assert df["bb"].iloc[0] == 0.15
+ if not using_copy_on_write:
+ assert df["bb"].iloc[0] == 0.15
+ else:
+ assert df["bb"].iloc[0] == 2.2
def test_getitem_loc_assignment_slice_state(self):
# GH 13569
diff --git a/pandas/tests/indexing/test_iat.py b/pandas/tests/indexing/test_iat.py
index 44bd51ee1b7d1..916303884df88 100644
--- a/pandas/tests/indexing/test_iat.py
+++ b/pandas/tests/indexing/test_iat.py
@@ -31,7 +31,7 @@ def test_iat_getitem_series_with_period_index():
assert expected == result
-def test_iat_setitem_item_cache_cleared(indexer_ial):
+def test_iat_setitem_item_cache_cleared(indexer_ial, using_copy_on_write):
# GH#45684
data = {"x": np.arange(8, dtype=np.int64), "y": np.int64(0)}
df = DataFrame(data).copy()
@@ -44,5 +44,6 @@ def test_iat_setitem_item_cache_cleared(indexer_ial):
indexer_ial(df)[7, 1] = 1234
assert df.iat[7, 1] == 1234
- assert ser.iloc[-1] == 1234
+ if not using_copy_on_write:
+ assert ser.iloc[-1] == 1234
assert df.iloc[-1, -1] == 1234
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index fdf741040407f..8cc6b6e73aaea 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -123,7 +123,7 @@ def test_iloc_setitem_ea_inplace(self, frame_or_series, box):
if frame_or_series is Series:
values = obj.values
else:
- values = obj[0].values
+ values = obj._mgr.arrays[0]
if frame_or_series is Series:
obj.iloc[:2] = box(arr[2:])
@@ -843,7 +843,9 @@ def test_iloc_empty_list_indexer_is_ok(self):
df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
- def test_identity_slice_returns_new_object(self, using_array_manager, request):
+ def test_identity_slice_returns_new_object(
+ self, using_array_manager, using_copy_on_write, request
+ ):
# GH13873
if using_array_manager:
mark = pytest.mark.xfail(
@@ -859,8 +861,12 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert np.shares_memory(original_df["a"], sliced_df["a"])
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
+ # depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
- assert (sliced_df["a"] == 4).all()
+ if using_copy_on_write:
+ assert (sliced_df["a"] == [1, 2, 3]).all()
+ else:
+ assert (sliced_df["a"] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
@@ -868,7 +874,11 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
- assert all(sliced_series[:3] == [7, 8, 9])
+ if using_copy_on_write:
+ # shallow copy not updated (CoW)
+ assert all(sliced_series[:3] == [1, 2, 3])
+ else:
+ assert all(sliced_series[:3] == [7, 8, 9])
def test_indexing_zerodim_np_array(self):
# GH24919
@@ -884,9 +894,10 @@ def test_series_indexing_zerodim_np_array(self):
assert result == 1
@td.skip_array_manager_not_yet_implemented
- def test_iloc_setitem_categorical_updates_inplace(self):
+ def test_iloc_setitem_categorical_updates_inplace(self, using_copy_on_write):
# Mixed dtype ensures we go through take_split_path in setitem_with_indexer
cat = Categorical(["A", "B", "C"])
+ cat_original = cat.copy()
df = DataFrame({1: cat, 2: [1, 2, 3]}, copy=False)
assert tm.shares_memory(df[1], cat)
@@ -895,9 +906,13 @@ def test_iloc_setitem_categorical_updates_inplace(self):
msg = "will attempt to set the values inplace instead"
with tm.assert_produces_warning(FutureWarning, match=msg):
df.iloc[:, 0] = cat[::-1]
- assert tm.shares_memory(df[1], cat)
- expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"])
+ if not using_copy_on_write:
+ assert tm.shares_memory(df[1], cat)
+ expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"])
+ else:
+ expected = cat_original
+
tm.assert_categorical_equal(cat, expected)
def test_iloc_with_boolean_operation(self):
@@ -1395,8 +1410,9 @@ def test_frame_iloc_setitem_callable(self):
class TestILocSeries:
- def test_iloc(self):
+ def test_iloc(self, using_copy_on_write):
ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
+ ser_original = ser.copy()
for i in range(len(ser)):
result = ser.iloc[i]
@@ -1412,7 +1428,10 @@ def test_iloc(self):
with tm.assert_produces_warning(None):
# GH#45324 make sure we aren't giving a spurious FutureWarning
result[:] = 0
- assert (ser.iloc[1:3] == 0).all()
+ if using_copy_on_write:
+ tm.assert_series_equal(ser, ser_original)
+ else:
+ assert (ser.iloc[1:3] == 0).all()
# list of integers
result = ser.iloc[[0, 2, 3, 4, 5]]
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index a7c03c672be58..b13177ad940eb 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1066,7 +1066,9 @@ def test_loc_empty_list_indexer_is_ok(self):
df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
- def test_identity_slice_returns_new_object(self, using_array_manager, request):
+ def test_identity_slice_returns_new_object(
+ self, using_array_manager, request, using_copy_on_write
+ ):
# GH13873
if using_array_manager:
mark = pytest.mark.xfail(
@@ -1083,8 +1085,12 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values)
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
+ # depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
- assert (sliced_df["a"] == 4).all()
+ if using_copy_on_write:
+ assert (sliced_df["a"] == [1, 2, 3]).all()
+ else:
+ assert (sliced_df["a"] == 4).all()
# These should not return copies
assert original_df is original_df.loc[:, :]
@@ -1098,7 +1104,10 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert original_series[:] is not original_series
original_series[:3] = [7, 8, 9]
- assert all(sliced_series[:3] == [7, 8, 9])
+ if using_copy_on_write:
+ assert all(sliced_series[:3] == [1, 2, 3])
+ else:
+ assert all(sliced_series[:3] == [7, 8, 9])
@pytest.mark.xfail(reason="accidental fix reverted - GH37497")
def test_loc_copy_vs_view(self):
@@ -2558,7 +2567,7 @@ def test_loc_setitem_boolean_and_column(self, float_frame):
tm.assert_frame_equal(float_frame, expected)
- def test_loc_setitem_ndframe_values_alignment(self):
+ def test_loc_setitem_ndframe_values_alignment(self, using_copy_on_write):
# GH#45501
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df.loc[[False, False, True], ["a"]] = DataFrame(
@@ -2579,9 +2588,13 @@ def test_loc_setitem_ndframe_values_alignment(self):
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ df_orig = df.copy()
ser = df["a"]
ser.loc[[False, False, True]] = Series([10, 11, 12], index=[2, 1, 0])
- tm.assert_frame_equal(df, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, expected)
class TestLocListlike:
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 2f3b569c899e1..e4f3b8847238f 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -744,7 +744,7 @@ def test_reindex_items(self):
mgr.iget(3).internal_values(), reindexed.iget(3).internal_values()
)
- def test_get_numeric_data(self):
+ def test_get_numeric_data(self, using_copy_on_write):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
@@ -765,10 +765,16 @@ def test_get_numeric_data(self):
np.array([100.0, 200.0, 300.0]),
inplace=True,
)
- tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- np.array([100.0, 200.0, 300.0]),
- )
+ if using_copy_on_write:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([1.0, 1.0, 1.0]),
+ )
+ else:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([100.0, 200.0, 300.0]),
+ )
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
@@ -777,12 +783,18 @@ def test_get_numeric_data(self):
np.array([1000.0, 2000.0, 3000.0]),
inplace=True,
)
- tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- np.array([100.0, 200.0, 300.0]),
- )
+ if using_copy_on_write:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([1.0, 1.0, 1.0]),
+ )
+ else:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([100.0, 200.0, 300.0]),
+ )
- def test_get_bool_data(self):
+ def test_get_bool_data(self, using_copy_on_write):
msg = "object-dtype columns with all-bool values"
mgr = create_mgr(
"int: int; float: float; complex: complex;"
@@ -800,19 +812,31 @@ def test_get_bool_data(self):
)
bools.iset(0, np.array([True, False, True]), inplace=True)
- tm.assert_numpy_array_equal(
- mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
- )
+ if using_copy_on_write:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, True, True]),
+ )
+ else:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, False, True]),
+ )
# Check sharing
with tm.assert_produces_warning(FutureWarning, match=msg):
bools2 = mgr.get_bool_data(copy=True)
bools2.iset(0, np.array([False, True, False]))
- tm.assert_numpy_array_equal(
- mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
- )
+ if using_copy_on_write:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, True, True]),
+ )
+ else:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, False, True]),
+ )
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr("b,\u05d0: object"))
diff --git a/pandas/tests/reshape/test_from_dummies.py b/pandas/tests/reshape/test_from_dummies.py
index c52331e54f95e..ab80473725288 100644
--- a/pandas/tests/reshape/test_from_dummies.py
+++ b/pandas/tests/reshape/test_from_dummies.py
@@ -164,7 +164,7 @@ def test_error_with_prefix_default_category_dict_not_complete(
def test_error_with_prefix_contains_nan(dummies_basic):
- dummies_basic["col2_c"][2] = np.nan
+ dummies_basic.loc[2, "col2_c"] = np.nan
with pytest.raises(
ValueError, match=r"Dummy DataFrame contains NA value in column: 'col2_c'"
):
@@ -172,7 +172,7 @@ def test_error_with_prefix_contains_nan(dummies_basic):
def test_error_with_prefix_contains_non_dummies(dummies_basic):
- dummies_basic["col2_c"][2] = "str"
+ dummies_basic.loc[2, "col2_c"] = "str"
with pytest.raises(TypeError, match=r"Passed DataFrame contains non-dummy data"):
from_dummies(dummies_basic, sep="_")
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
index afb6d0f19daca..adee227c0f0ac 100644
--- a/pandas/tests/series/accessors/test_dt_accessor.py
+++ b/pandas/tests/series/accessors/test_dt_accessor.py
@@ -279,7 +279,7 @@ def test_dt_accessor_ambiguous_freq_conversions(self):
expected = Series(exp_values, name="xxx")
tm.assert_series_equal(ser, expected)
- def test_dt_accessor_not_writeable(self):
+ def test_dt_accessor_not_writeable(self, using_copy_on_write):
# no setting allowed
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
with pytest.raises(ValueError, match="modifications"):
@@ -288,8 +288,12 @@ def test_dt_accessor_not_writeable(self):
# trying to set a copy
msg = "modifications to a property of a datetimelike.+not supported"
with pd.option_context("chained_assignment", "raise"):
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) it would be nice to keep a warning/error for this case
ser.dt.hour[0] = 5
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ ser.dt.hour[0] = 5
@pytest.mark.parametrize(
"method, dates",
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 7a3e18c64f366..6ddffd0d006dc 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -209,7 +209,8 @@ def test_basic_getitem_setitem_corner(datetime_series):
datetime_series[[5, slice(None, None)]] = 2
-def test_slice(string_series, object_series):
+def test_slice(string_series, object_series, using_copy_on_write):
+ original = string_series.copy()
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
objSlice = object_series[10:20]
@@ -227,7 +228,11 @@ def test_slice(string_series, object_series):
sl = string_series[10:20]
sl[:] = 0
- assert (string_series[10:20] == 0).all()
+ if using_copy_on_write:
+ # Doesn't modify parent (CoW)
+ tm.assert_series_equal(string_series, original)
+ else:
+ assert (string_series[10:20] == 0).all()
def test_timedelta_assignment():
@@ -244,7 +249,7 @@ def test_timedelta_assignment():
tm.assert_series_equal(s, expected)
-def test_underlying_data_conversion():
+def test_underlying_data_conversion(using_copy_on_write):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
msg = "The 'inplace' keyword"
@@ -253,15 +258,19 @@ def test_underlying_data_conversion():
assert return_value is None
s = Series([1], index=[(2, 2, 2)])
df["val"] = 0
+ df_original = df.copy()
df
df["val"].update(s)
- expected = DataFrame(
- {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- return_value = expected.set_index(["a", "b", "c"], inplace=True)
- assert return_value is None
+ if using_copy_on_write:
+ expected = df_original
+ else:
+ expected = DataFrame(
+ {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ return_value = expected.set_index(["a", "b", "c"], inplace=True)
+ assert return_value is None
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/series/methods/test_copy.py b/pandas/tests/series/methods/test_copy.py
index 8aa5c14812dc0..d681c0d02e0a2 100644
--- a/pandas/tests/series/methods/test_copy.py
+++ b/pandas/tests/series/methods/test_copy.py
@@ -9,20 +9,28 @@
class TestCopy:
- @pytest.mark.parametrize("deep", [None, False, True])
- def test_copy(self, deep):
+ @pytest.mark.parametrize("deep", ["default", None, False, True])
+ def test_copy(self, deep, using_copy_on_write):
ser = Series(np.arange(10), dtype="float64")
# default deep is True
- if deep is None:
+ if deep == "default":
ser2 = ser.copy()
else:
ser2 = ser.copy(deep=deep)
+ if using_copy_on_write:
+ # INFO(CoW) a shallow copy doesn't yet copy the data
+ # but parent will not be modified (CoW)
+ if deep is None or deep is False:
+ assert np.may_share_memory(ser.values, ser2.values)
+ else:
+ assert not np.may_share_memory(ser.values, ser2.values)
+
ser2[::2] = np.NaN
- if deep is None or deep is True:
+ if deep is not False or using_copy_on_write:
# Did not modify original Series
assert np.isnan(ser2[0])
assert not np.isnan(ser[0])
@@ -31,8 +39,8 @@ def test_copy(self, deep):
assert np.isnan(ser2[0])
assert np.isnan(ser[0])
- @pytest.mark.parametrize("deep", [None, False, True])
- def test_copy_tzaware(self, deep):
+ @pytest.mark.parametrize("deep", ["default", None, False, True])
+ def test_copy_tzaware(self, deep, using_copy_on_write):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
@@ -40,15 +48,23 @@ def test_copy_tzaware(self, deep):
ser = Series([Timestamp("2012/01/01", tz="UTC")])
- if deep is None:
+ if deep == "default":
ser2 = ser.copy()
else:
ser2 = ser.copy(deep=deep)
+ if using_copy_on_write:
+ # INFO(CoW) a shallow copy doesn't yet copy the data
+ # but parent will not be modified (CoW)
+ if deep is None or deep is False:
+ assert np.may_share_memory(ser.values, ser2.values)
+ else:
+ assert not np.may_share_memory(ser.values, ser2.values)
+
ser2[0] = Timestamp("1999/01/01", tz="UTC")
# default deep is True
- if deep is None or deep is True:
+ if deep is not False or using_copy_on_write:
# Did not modify original Series
tm.assert_series_equal(ser2, expected2)
tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/methods/test_get_numeric_data.py b/pandas/tests/series/methods/test_get_numeric_data.py
index e386f4b5b1dec..60dd64d7e1948 100644
--- a/pandas/tests/series/methods/test_get_numeric_data.py
+++ b/pandas/tests/series/methods/test_get_numeric_data.py
@@ -7,13 +7,20 @@
class TestGetNumericData:
- def test_get_numeric_data_preserve_dtype(self):
+ def test_get_numeric_data_preserve_dtype(self, using_copy_on_write):
# get the numeric data
obj = Series([1, 2, 3])
result = obj._get_numeric_data()
tm.assert_series_equal(result, obj)
+ # returned object is a shallow copy
+ result.iloc[0] = 0
+ if using_copy_on_write:
+ assert obj.iloc[0] == 1
+ else:
+ assert obj.iloc[0] == 0
+
obj = Series([1, "2", 3.0])
result = obj._get_numeric_data()
expected = Series([], dtype=object, index=Index([], dtype=object))
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index 729c07b8bdde7..d0392929cb082 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -143,10 +143,15 @@ def test_rename_error_arg(self):
with pytest.raises(KeyError, match=match):
ser.rename({2: 9}, errors="raise")
- def test_rename_copy_false(self):
+ def test_rename_copy_false(self, using_copy_on_write):
# GH 46889
ser = Series(["foo", "bar"])
+ ser_orig = ser.copy()
shallow_copy = ser.rename({1: 9}, copy=False)
ser[0] = "foobar"
- assert ser[0] == shallow_copy[0]
- assert ser[1] == shallow_copy[9]
+ if using_copy_on_write:
+ assert ser_orig[0] == shallow_copy[0]
+ assert ser_orig[1] == shallow_copy[9]
+ else:
+ assert ser[0] == shallow_copy[0]
+ assert ser[1] == shallow_copy[9]
diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py
index d9d6641d54237..6403fcf76122a 100644
--- a/pandas/tests/series/methods/test_update.py
+++ b/pandas/tests/series/methods/test_update.py
@@ -14,7 +14,7 @@
class TestUpdate:
- def test_update(self):
+ def test_update(self, using_copy_on_write):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
@@ -25,11 +25,15 @@ def test_update(self):
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
+ df_orig = df.copy()
df["c"].update(Series(["foo"], index=[0]))
- expected = DataFrame(
- [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
- )
+ if using_copy_on_write:
+ expected = df_orig
+ else:
+ expected = DataFrame(
+ [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
+ )
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
| This is a port of the proof of concept using the ArrayManager in https://github.com/pandas-dev/pandas/pull/41878 to the default BlockManager.
This PR is a start to implement the proposal described in more detail in https://docs.google.com/document/d/1ZCQ9mx3LBMy-nhwRl33_jgcvWo9IWdEfxDNQ2thyTb0/edit / discussed in https://github.com/pandas-dev/pandas/issues/36195
A very brief summary of the behaviour you get:
- *Any* subset (so also a slice, single column access, etc) _behaves_ as a copy (using CoW, or is already a copy)
- DataFrame methods that return a new DataFrame return shallow copies (using CoW) if applicable (for now, this is only implemented / tested for `reset_index` and `rename`, needs to be expanded to other methods)
**Implementation approach**
This PR adds Copy-on-Write (CoW) functionality to the DataFrame/Series at the BlockManager level. It does this by adding a new `.refs` attribute to the `BlockManager` that, if populated, keeps a list of `weakref` references to the blocks it shares data with (so for the BlockManager, this reference tracking is done per block, so `len(mgr.blocks) == len(mgr.refs)`).
This ensures that if we are modifying a block of a child manager, we can check if it is referencing (viewing) another block, and if needed do a copy on write. And also if we are modifying a block of a parent manager, we can check if that block is being referenced by another manager and if needed do a copy on write in this parent frame. (of course, a manager can both be parent and child at the same time, so those two checks always happen both)
---
**How to enable this new behaviour?**
Currently this PR simply enabled the new behaviour with CoW, but of course that will need to be turned off before merging (which also means that some of the changes will need to put behind a feature flag. I only did that now in some places).
I think that ideally, (on the short term) users have a way to enable the future behaviour (eg using an option), but _also_ have a way to enable additional warnings.
I already started adding an option, currently the boolean flag `options.mode.copy_on_write=True|False`:
* Do we have a better name? I personally don't like that it uses "copy_on_write", because this is the internal implementation detail, and not what most end users really have to care about. But something like "new_copy_view_behaviour" is also not super ..
* In addition to True/False, we can probably add "warn" as a third option, which gives warnings in cases where behaviour would change.
---
Some notes:
- Not everything is already implemented (there are a couple of `TODO(CoW)` in the code), although the majority for indexing / setitem is done.
- This PR does not yet try to tackle copy/view behaviour for the constructors, or for numpy array access (`.values`). Given the size of this PR already, those can probably be done in separate PRs?
- Most tests are already passing (with changes), but still need to fix a few tests outside of /indexing
- We will also need to think about a way to test this (in a similar way as the ArrayManager with an environment variable?)
I will also pull out some of the changes in separate PRs (eg the new test file could already be discussed/reviewed separately (-> https://github.com/pandas-dev/pandas/pull/46979), and the `column_setitem` is maybe also something that could be done as pre-cursor(-> https://github.com/pandas-dev/pandas/pull/47074)) | https://api.github.com/repos/pandas-dev/pandas/pulls/46958 | 2022-05-06T21:54:32Z | 2022-08-20T18:45:23Z | 2022-08-20T18:45:23Z | 2022-10-07T08:43:02Z |
DOC: fix cookbook groupby & transform example | diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index d0b2119f9d315..daf5a0e481b8e 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -511,7 +511,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
def replace(g):
mask = g < 0
- return g.where(mask, g[~mask].mean())
+ return g.where(~mask, g[~mask].mean())
gb.transform(replace)
| In the example, all negative values of a group should be replaced by the mean of the rest of the group. The linked stackoverflow output has it right: https://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means
We have to negate the first argument of `DataFrame.where` here, since this should be the condition when the value stays the same (and this should be the case for `g>=0`.)
pandas `df.where` behavior is pretty weird when coming from Spark world :)
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46957 | 2022-05-06T20:24:27Z | 2022-05-07T02:24:07Z | 2022-05-07T02:24:07Z | 2022-05-07T02:24:15Z |
ENH: plot.scatter passes errorbar specific keyword arguments to ax.er… | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 2efc6c9167a83..76ef011b4fa31 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -713,6 +713,7 @@ Plotting
- Bug in :meth:`DataFrame.boxplot` that prevented specifying ``vert=False`` (:issue:`36918`)
- Bug in :meth:`DataFrame.plot.scatter` that prevented specifying ``norm`` (:issue:`45809`)
- The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`)
+- The function :meth:`DataFrame.plot.scatter` now accepts matplotlib errorbar keywords to allow customizating the axis errorbar (:issue:`46952`)
- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
Groupby/resample/rolling
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 3899885bd95f1..d8663a62a12ef 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1,8 +1,11 @@
from __future__ import annotations
+import inspect
from typing import (
TYPE_CHECKING,
Hashable,
+ Iterable,
+ Sequence,
)
import warnings
@@ -102,7 +105,7 @@ def __init__(
data,
kind=None,
by: IndexLabel | None = None,
- subplots=False,
+ subplots: bool | Sequence[Sequence[str]] = False,
sharex=None,
sharey=False,
use_index=True,
@@ -166,8 +169,7 @@ def __init__(
self.kind = kind
self.sort_columns = sort_columns
-
- self.subplots = subplots
+ self.subplots = self._validate_subplots_kwarg(subplots)
if sharex is None:
@@ -253,6 +255,112 @@ def __init__(
self._validate_color_args()
+ def _validate_subplots_kwarg(
+ self, subplots: bool | Sequence[Sequence[str]]
+ ) -> bool | list[tuple[int, ...]]:
+ """
+ Validate the subplots parameter
+
+ - check type and content
+ - check for duplicate columns
+ - check for invalid column names
+ - convert column names into indices
+ - add missing columns in a group of their own
+ See comments in code below for more details.
+
+ Parameters
+ ----------
+ subplots : subplots parameters as passed to PlotAccessor
+
+ Returns
+ -------
+ validated subplots : a bool or a list of tuples of column indices. Columns
+ in the same tuple will be grouped together in the resulting plot.
+ """
+
+ if isinstance(subplots, bool):
+ return subplots
+ elif not isinstance(subplots, Iterable):
+ raise ValueError("subplots should be a bool or an iterable")
+
+ supported_kinds = (
+ "line",
+ "bar",
+ "barh",
+ "hist",
+ "kde",
+ "density",
+ "area",
+ "pie",
+ )
+ if self._kind not in supported_kinds:
+ raise ValueError(
+ "When subplots is an iterable, kind must be "
+ f"one of {', '.join(supported_kinds)}. Got {self._kind}."
+ )
+
+ if isinstance(self.data, ABCSeries):
+ raise NotImplementedError(
+ "An iterable subplots for a Series is not supported."
+ )
+
+ columns = self.data.columns
+ if isinstance(columns, ABCMultiIndex):
+ raise NotImplementedError(
+ "An iterable subplots for a DataFrame with a MultiIndex column "
+ "is not supported."
+ )
+
+ if columns.nunique() != len(columns):
+ raise NotImplementedError(
+ "An iterable subplots for a DataFrame with non-unique column "
+ "labels is not supported."
+ )
+
+ # subplots is a list of tuples where each tuple is a group of
+ # columns to be grouped together (one ax per group).
+ # we consolidate the subplots list such that:
+ # - the tuples contain indices instead of column names
+ # - the columns that aren't yet in the list are added in a group
+ # of their own.
+ # For example with columns from a to g, and
+ # subplots = [(a, c), (b, f, e)],
+ # we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)]
+ # This way, we can handle self.subplots in a homogeneous manner
+ # later.
+ # TODO: also accept indices instead of just names?
+
+ out = []
+ seen_columns: set[Hashable] = set()
+ for group in subplots:
+ if not is_list_like(group):
+ raise ValueError(
+ "When subplots is an iterable, each entry "
+ "should be a list/tuple of column names."
+ )
+ idx_locs = columns.get_indexer_for(group)
+ if (idx_locs == -1).any():
+ bad_labels = np.extract(idx_locs == -1, group)
+ raise ValueError(
+ f"Column label(s) {list(bad_labels)} not found in the DataFrame."
+ )
+ else:
+ unique_columns = set(group)
+ duplicates = seen_columns.intersection(unique_columns)
+ if duplicates:
+ raise ValueError(
+ "Each column should be in only one subplot. "
+ f"Columns {duplicates} were found in multiple subplots."
+ )
+ seen_columns = seen_columns.union(unique_columns)
+ out.append(tuple(idx_locs))
+
+ unseen_columns = columns.difference(seen_columns)
+ for column in unseen_columns:
+ idx_loc = columns.get_loc(column)
+ out.append((idx_loc,))
+ return out
+
def _validate_color_args(self):
if (
"color" in self.kwds
@@ -371,8 +479,11 @@ def _maybe_right_yaxis(self, ax: Axes, axes_num):
def _setup_subplots(self):
if self.subplots:
+ naxes = (
+ self.nseries if isinstance(self.subplots, bool) else len(self.subplots)
+ )
fig, axes = create_subplots(
- naxes=self.nseries,
+ naxes=naxes,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
@@ -784,9 +895,23 @@ def _get_ax_layer(cls, ax, primary=True):
else:
return getattr(ax, "right_ax", ax)
+ def _col_idx_to_axis_idx(self, col_idx: int) -> int:
+ """Return the index of the axis where the column at col_idx should be plotted"""
+ if isinstance(self.subplots, list):
+ # Subplots is a list: some columns will be grouped together in the same ax
+ return next(
+ group_idx
+ for (group_idx, group) in enumerate(self.subplots)
+ if col_idx in group
+ )
+ else:
+ # subplots is True: one ax per column
+ return col_idx
+
def _get_ax(self, i: int):
# get the twinx ax if appropriate
if self.subplots:
+ i = self._col_idx_to_axis_idx(i)
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
@@ -1089,6 +1214,35 @@ def _make_plot(self):
plot_colorbar = self.colormap or c_is_column
cb = self.kwds.pop("colorbar", is_numeric_dtype(c_values) and plot_colorbar)
+ # move matplotlib.errorbar specific keywords that are not accepted by
+ # ax.scatter to an err_kwds dict for use in the call to ax.errorbar
+ ebarkeys = {
+ param
+ for param in inspect.signature(ax.errorbar).parameters
+ if param not in ["x", "xerr", "y", "yerr", "kwargs"]
+ }
+ # These errorbar kwargs are useful with the fmt and errorrevery
+ # parameters.
+ ebarkeys |= {
+ "markevery",
+ "ms",
+ "markersize",
+ "mfc",
+ "markerfacecolor",
+ "mec",
+ "markeredgecolor",
+ "mew",
+ "markeredgewidth",
+ }
+ err_kwds = {
+ ebarkey: self.kwds.pop(ebarkey) for ebarkey in set(self.kwds) & ebarkeys
+ }
+ if err_kwds.get("fmt") is None:
+ err_kwds["linestyle"] = "none"
+ elif err_kwds.get("fmt", "").casefold() == "none":
+ # set point size to 0 for ax.scatter to only show errorbars
+ self.kwds["s"] = 0
+
if self.legend and hasattr(self, "label"):
label = self.label
else:
@@ -1117,9 +1271,14 @@ def _make_plot(self):
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
- err_kwds = dict(errors_x, **errors_y)
- err_kwds["ecolor"] = scatter.get_facecolor()[0]
- ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds)
+ err_kwds = dict(err_kwds, **errors_x, **errors_y)
+ if "ecolor" not in err_kwds:
+ try:
+ err_kwds["ecolor"] = scatter.get_facecolor()[0]
+ except IndexError:
+ # no facecolor so use edgecolor
+ err_kwds["ecolor"] = scatter.get_edgecolor()[0]
+ ax.errorbar(data[x].values, data[y].values, **err_kwds)
class HexBinPlot(PlanePlot):
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index c4ce0b256cd41..7c07ef574fc97 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -661,6 +661,57 @@ def test_plot_scatter(self):
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+ def test_plot_scatter_errorbar_kwd(self):
+ df = DataFrame(
+ np.random.randn(6, 4),
+ index=list(string.ascii_letters[:6]),
+ columns=["x", "y", "xerr", "yerr"],
+ )
+
+ _check_plot_works(
+ df.plot.scatter, x="x", y="y", xerr="xerr", yerr="yerr", fc="none", ec="b"
+ )
+
+ _check_plot_works(
+ df.plot.scatter,
+ x="x",
+ y="y",
+ xerr="xerr",
+ yerr="yerr",
+ barsabove=True,
+ capsize=3,
+ capthick=1.5,
+ ecolor="k",
+ elinewidth=1.5,
+ errorevery=1,
+ fmt="none",
+ ms=5,
+ lolims=True,
+ uplims=False,
+ xlolims=False,
+ xuplims=True,
+ )
+
+ _check_plot_works(
+ df.plot.scatter,
+ x="x",
+ y="y",
+ xerr="xerr",
+ yerr="yerr",
+ barsabove=True,
+ capsize=3,
+ capthick=1.5,
+ ecolor="k",
+ elinewidth=1.5,
+ errorevery=1,
+ fmt="gs--",
+ markersize=5,
+ lolims=True,
+ uplims=False,
+ xlolims=False,
+ xuplims=True,
+ )
+
def test_raise_error_on_datetime_time_data(self):
# GH 8113, datetime.time type is not supported by matplotlib in scatter
df = DataFrame(np.random.randn(10), columns=["a"])
| …rorbar() (#46952)
- [x] closes #46952 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46954 | 2022-05-06T02:52:24Z | 2022-08-08T23:51:41Z | null | 2022-08-08T23:51:42Z |
CLN: Remove special case for rank in groupby.ops | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 03f318d08d8cb..7f5fe85e07f40 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1325,8 +1325,8 @@ def group_rank(
mask=sub_mask,
)
for i in range(len(result)):
- # TODO: why can't we do out[:, k] = result?
- out[i, k] = result[i]
+ if labels[i] >= 0:
+ out[i, k] = result[i]
# ----------------------------------------------------------------------
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index a769c92e0b542..7285824f0111f 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -623,10 +623,6 @@ def _call_cython_op(
result = result.T
- if self.how == "rank" and self.has_dropped_na:
- # TODO: Wouldn't need this if group_rank supported mask
- result = np.where(comp_ids < 0, np.nan, result)
-
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46953 | 2022-05-06T02:46:48Z | 2022-05-06T17:02:37Z | 2022-05-06T17:02:37Z | 2022-05-06T17:06:08Z |
Fix for issue #46870 DataFrame.select_dtypes(include='number') includes BooleanDtype | diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 35b9de3f7af93..626e46483b14d 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -99,7 +99,7 @@ def _is_boolean(self) -> bool:
@property
def _is_numeric(self) -> bool:
- return True
+ return False
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
| - [x] closes #46870 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46951 | 2022-05-06T02:22:03Z | 2022-06-22T09:59:34Z | null | 2022-06-22T09:59:34Z |
WIP: CI: Debug Windows recurssion error | diff --git a/.github/workflows/assign.yml b/.github/workflows/assign.yml
deleted file mode 100644
index a1812843b1a8f..0000000000000
--- a/.github/workflows/assign.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-name: Assign
-on:
- issue_comment:
- types: created
-
-jobs:
- issue_assign:
- runs-on: ubuntu-latest
- steps:
- - if: github.event.comment.body == 'take'
- run: |
- echo "Assigning issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}"
- curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"assignees": ["${{ github.event.comment.user.login }}"]}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees
diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml
deleted file mode 100644
index 78c224b84d5d9..0000000000000
--- a/.github/workflows/asv-bot.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-name: "ASV Bot"
-
-on:
- issue_comment: # Pull requests are issues
- types:
- - created
-
-env:
- ENV_FILE: environment.yml
- COMMENT: ${{github.event.comment.body}}
-
-jobs:
- autotune:
- name: "Run benchmarks"
- # TODO: Support more benchmarking options later, against different branches, against self, etc
- if: startsWith(github.event.comment.body, '@github-actions benchmark')
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -el {0}
-
- concurrency:
- # Set concurrency to prevent abuse(full runs are ~5.5 hours !!!)
- # each user can only run one concurrent benchmark bot at a time
- # We don't cancel in progress jobs, but if you want to benchmark multiple PRs, you're gonna have
- # to wait
- group: ${{ github.actor }}-asv
- cancel-in-progress: false
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- # Although asv sets up its own env, deps are still needed
- # during discovery process
- - uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- activate-environment: pandas-dev
- channel-priority: strict
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Run benchmarks
- id: bench
- continue-on-error: true # This is a fake failure, asv will exit code 1 for regressions
- run: |
- # extracting the regex, see https://stackoverflow.com/a/36798723
- REGEX=$(echo "$COMMENT" | sed -n "s/^.*-b\s*\(\S*\).*$/\1/p")
- cd asv_bench
- asv check -E existing
- git remote add upstream https://github.com/pandas-dev/pandas.git
- git fetch upstream
- asv machine --yes
- asv continuous -f 1.1 -b $REGEX upstream/main HEAD
- echo 'BENCH_OUTPUT<<EOF' >> $GITHUB_ENV
- asv compare -f 1.1 upstream/main HEAD >> $GITHUB_ENV
- echo 'EOF' >> $GITHUB_ENV
- echo "REGEX=$REGEX" >> $GITHUB_ENV
-
- - uses: actions/github-script@v6
- env:
- BENCH_OUTPUT: ${{env.BENCH_OUTPUT}}
- REGEX: ${{env.REGEX}}
- with:
- script: |
- const ENV_VARS = process.env
- const run_url = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: '\nBenchmarks completed. View runner logs here.' + run_url + '\nRegex used: '+ 'regex ' + ENV_VARS["REGEX"] + '\n' + ENV_VARS["BENCH_OUTPUT"]
- })
diff --git a/.github/workflows/autoupdate-pre-commit-config.yml b/.github/workflows/autoupdate-pre-commit-config.yml
deleted file mode 100644
index d2eac234ca361..0000000000000
--- a/.github/workflows/autoupdate-pre-commit-config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-name: "Update pre-commit config"
-
-on:
- schedule:
- - cron: "0 7 1 * *" # At 07:00 on 1st of every month.
- workflow_dispatch:
-
-jobs:
- update-pre-commit:
- if: github.repository_owner == 'pandas-dev'
- name: Autoupdate pre-commit config
- runs-on: ubuntu-latest
- steps:
- - name: Set up Python
- uses: actions/setup-python@v3
- - name: Cache multiple paths
- uses: actions/cache@v3
- with:
- path: |
- ~/.cache/pre-commit
- ~/.cache/pip
- key: pre-commit-autoupdate-${{ runner.os }}-build
- - name: Update pre-commit config packages
- uses: technote-space/create-pr-action@v2
- with:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- EXECUTE_COMMANDS: |
- pip install pre-commit
- pre-commit autoupdate || (exit 0);
- pre-commit run -a || (exit 0);
- COMMIT_MESSAGE: "⬆️ UPGRADE: Autoupdate pre-commit config"
- PR_BRANCH_NAME: "pre-commit-config-update-${PR_ID}"
- PR_TITLE: "⬆️ UPGRADE: Autoupdate pre-commit config"
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
deleted file mode 100644
index a27ed42c984bf..0000000000000
--- a/.github/workflows/code-checks.yml
+++ /dev/null
@@ -1,172 +0,0 @@
-name: Code Checks
-
-on:
- push:
- branches:
- - main
- - 1.4.x
- pull_request:
- branches:
- - main
- - 1.4.x
-
-env:
- ENV_FILE: environment.yml
- PANDAS_CI: 1
-
-jobs:
- pre_commit:
- name: pre-commit
- runs-on: ubuntu-latest
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-pre-commit
- cancel-in-progress: true
- steps:
- - name: Checkout
- uses: actions/checkout@v3
-
- - name: Install Python
- uses: actions/setup-python@v3
- with:
- python-version: '3.9.7'
-
- - name: Run pre-commit
- uses: pre-commit/action@v2.0.3
-
- typing_and_docstring_validation:
- name: Docstring and typing validation
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -el {0}
-
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-code-checks
- cancel-in-progress: true
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- - uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- mamba-version: "*"
- channels: conda-forge
- activate-environment: pandas-dev
- channel-priority: strict
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Install node.js (for pyright)
- uses: actions/setup-node@v3
- with:
- node-version: "16"
-
- - name: Install pyright
- # note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.230
-
- - name: Build Pandas
- id: build
- uses: ./.github/actions/build_pandas
-
- - name: Check for no warnings when building single-page docs
- run: ci/code_checks.sh single-docs
- if: ${{ steps.build.outcome == 'success' }}
-
- - name: Run checks on imported code
- run: ci/code_checks.sh code
- if: ${{ steps.build.outcome == 'success' }}
-
- - name: Run doctests
- run: ci/code_checks.sh doctests
- if: ${{ steps.build.outcome == 'success' }}
-
- - name: Run docstring validation
- run: ci/code_checks.sh docstrings
- if: ${{ steps.build.outcome == 'success' }}
-
- - name: Run typing validation
- run: ci/code_checks.sh typing
- if: ${{ steps.build.outcome == 'success' }}
-
- - name: Run docstring validation script tests
- run: pytest scripts
- if: ${{ steps.build.outcome == 'success' }}
-
- asv-benchmarks:
- name: ASV Benchmarks
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -el {0}
-
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-asv-benchmarks
- cancel-in-progress: true
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- - uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- mamba-version: "*"
- channels: conda-forge
- activate-environment: pandas-dev
- channel-priority: strict
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Build Pandas
- id: build
- uses: ./.github/actions/build_pandas
-
- - name: Run ASV benchmarks
- run: |
- cd asv_bench
- asv machine --yes
- asv run --quick --dry-run --strict --durations=30 --python=same
-
- build_docker_dev_environment:
- name: Build Docker Dev Environment
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -el {0}
-
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-build_docker_dev_environment
- cancel-in-progress: true
-
- steps:
- - name: Clean up dangling images
- run: docker image prune -f
-
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Build image
- run: docker build --pull --no-cache --tag pandas-dev-env .
diff --git a/.github/workflows/comment_bot.yml b/.github/workflows/comment_bot.yml
deleted file mode 100644
index 3824e015e8336..0000000000000
--- a/.github/workflows/comment_bot.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Comment-bot
-
-on:
- issue_comment:
- types:
- - created
- - edited
-
-jobs:
- autotune:
- name: "Fixup pre-commit formatting"
- if: startsWith(github.event.comment.body, '@github-actions pre-commit')
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: r-lib/actions/pr-fetch@v2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- - name: Cache multiple paths
- uses: actions/cache@v3
- with:
- path: |
- ~/.cache/pre-commit
- ~/.cache/pip
- key: pre-commit-dispatched-${{ runner.os }}-build
- - uses: actions/setup-python@v3
- with:
- python-version: 3.8
- - name: Install-pre-commit
- run: python -m pip install --upgrade pre-commit
- - name: Run pre-commit
- run: pre-commit run --from-ref=origin/main --to-ref=HEAD --all-files || (exit 0)
- - name: Commit results
- run: |
- git config user.name "$(git log -1 --pretty=format:%an)"
- git config user.email "$(git log -1 --pretty=format:%ae)"
- git commit -a -m 'Fixes from pre-commit [automated commit]' || echo "No changes to commit"
- - uses: r-lib/actions/pr-push@v2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
deleted file mode 100644
index 8c2c86dc693a9..0000000000000
--- a/.github/workflows/docbuild-and-upload.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-name: Doc Build and Upload
-
-on:
- push:
- branches:
- - main
- - 1.4.x
- pull_request:
- branches:
- - main
- - 1.4.x
-
-env:
- ENV_FILE: environment.yml
- PANDAS_CI: 1
-
-jobs:
- web_and_docs:
- name: Doc Build and Upload
- runs-on: ubuntu-latest
-
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-web-docs
- cancel-in-progress: true
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Set up pandas
- uses: ./.github/actions/setup
-
- - name: Build website
- run: |
- source activate pandas-dev
- python web/pandas_web.py web/pandas --target-path=web/build
-
- - name: Build documentation
- run: |
- source activate pandas-dev
- doc/make.py --warnings-are-errors
-
- - name: Install ssh key
- run: |
- mkdir -m 700 -p ~/.ssh
- echo "${{ secrets.server_ssh_key }}" > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- echo "${{ secrets.server_ip }} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE1Kkopomm7FHG5enATf7SgnpICZ4W2bw+Ho+afqin+w7sMcrsa0je7sbztFAV8YchDkiBKnWTG4cRT+KZgZCaY=" > ~/.ssh/known_hosts
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}}
-
- - name: Copy cheatsheets into site directory
- run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/
-
- - name: Upload web
- run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}}
-
- - name: Upload dev docs
- run: rsync -az --delete doc/build/html/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas/pandas-docs/dev
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}}
-
- - name: Move docs into site directory
- run: mv doc/build/html web/build/docs
-
- - name: Save website as an artifact
- uses: actions/upload-artifact@v3
- with:
- name: website
- path: web/build
- retention-days: 14
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
deleted file mode 100644
index f5cbb0e88ff11..0000000000000
--- a/.github/workflows/posix.yml
+++ /dev/null
@@ -1,200 +0,0 @@
-name: Posix
-
-on:
- push:
- branches:
- - main
- - 1.4.x
- pull_request:
- branches:
- - main
- - 1.4.x
- paths-ignore:
- - "doc/**"
-
-env:
- PANDAS_CI: 1
-
-jobs:
- pytest:
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -el {0}
- timeout-minutes: 120
- strategy:
- matrix:
- env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
- pattern: ["not single_cpu", "single_cpu"]
- # Don't test pyarrow v2/3: Causes timeouts in read_csv engine
- # even if tests are skipped/xfailed
- pyarrow_version: ["5", "6", "7"]
- include:
- - name: "Downstream Compat"
- env_file: actions-38-downstream_compat.yaml
- pattern: "not slow and not network and not single_cpu"
- pytest_target: "pandas/tests/test_downstream.py"
- - name: "Minimum Versions"
- env_file: actions-38-minimum_versions.yaml
- pattern: "not slow and not network and not single_cpu"
- - name: "Locale: it_IT.utf8"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- extra_apt: "language-pack-it"
- lang: "it_IT.utf8"
- lc_all: "it_IT.utf8"
- - name: "Locale: zh_CN.utf8"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- extra_apt: "language-pack-zh-hans"
- lang: "zh_CN.utf8"
- lc_all: "zh_CN.utf8"
- - name: "Data Manager"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- pandas_data_manager: "array"
- - name: "Pypy"
- env_file: actions-pypy-38.yaml
- pattern: "not slow and not network and not single_cpu"
- test_args: "--max-worker-restart 0"
- - name: "Numpy Dev"
- env_file: actions-310-numpydev.yaml
- pattern: "not slow and not network and not single_cpu"
- pandas_testing_mode: "deprecate"
- test_args: "-W error::DeprecationWarning:numpy"
- exclude:
- - env_file: actions-39.yaml
- pyarrow_version: "6"
- - env_file: actions-39.yaml
- pyarrow_version: "7"
- - env_file: actions-310.yaml
- pyarrow_version: "6"
- - env_file: actions-310.yaml
- pyarrow_version: "7"
- fail-fast: false
- name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }}
- env:
- ENV_FILE: ci/deps/${{ matrix.env_file }}
- PATTERN: ${{ matrix.pattern }}
- EXTRA_APT: ${{ matrix.extra_apt || '' }}
- LANG: ${{ matrix.lang || '' }}
- LC_ALL: ${{ matrix.lc_all || '' }}
- PANDAS_TESTING_MODE: ${{ matrix.pandas_testing_mode || '' }}
- PANDAS_DATA_MANAGER: ${{ matrix.pandas_data_manager || 'block' }}
- TEST_ARGS: ${{ matrix.test_args || '' }}
- PYTEST_WORKERS: ${{ contains(matrix.pattern, 'not single_cpu') && 'auto' || '1' }}
- PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }}
- IS_PYPY: ${{ contains(matrix.env_file, 'pypy') }}
- # TODO: re-enable coverage on pypy, its slow
- COVERAGE: ${{ !contains(matrix.env_file, 'pypy') }}
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.pattern }}-${{ matrix.pyarrow_version || '' }}-${{ matrix.extra_apt || '' }}-${{ matrix.pandas_data_manager || '' }}
- cancel-in-progress: true
-
- services:
- mysql:
- image: mysql
- env:
- MYSQL_ALLOW_EMPTY_PASSWORD: yes
- MYSQL_DATABASE: pandas
- options: >-
- --health-cmd "mysqladmin ping"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 3306:3306
-
- postgres:
- image: postgres
- env:
- POSTGRES_USER: postgres
- POSTGRES_PASSWORD: postgres
- POSTGRES_DB: pandas
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- moto:
- image: motoserver/moto
- env:
- AWS_ACCESS_KEY_ID: foobar_key
- AWS_SECRET_ACCESS_KEY: foobar_secret
- ports:
- - 5000:5000
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v3
- env:
- CACHE_NUMBER: 0
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{
- hashFiles('${{ env.ENV_FILE }}') }}
-
- - name: Extra installs
- # xsel for clipboard tests
- run: sudo apt-get update && sudo apt-get install -y libc6-dev-i386 xsel ${{ env.EXTRA_APT }}
-
- - uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- mamba-version: "*"
- channels: conda-forge
- activate-environment: pandas-dev
- channel-priority: flexible
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
- if: ${{ env.IS_PYPY == 'false' }} # No pypy3.8 support
-
- - name: Upgrade Arrow version
- run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=${{ matrix.pyarrow_version }}
- if: ${{ matrix.pyarrow_version }}
-
- - name: Setup PyPy
- uses: actions/setup-python@v3
- with:
- python-version: "pypy-3.8"
- if: ${{ env.IS_PYPY == 'true' }}
-
- - name: Setup PyPy dependencies
- run: |
- # TODO: re-enable cov, its slowing the tests down though
- pip install Cython numpy python-dateutil pytz pytest>=6.0 pytest-xdist>=1.31.0 pytest-asyncio>=0.17 hypothesis>=5.5.3
- if: ${{ env.IS_PYPY == 'true' }}
-
- - name: Build Pandas
- uses: ./.github/actions/build_pandas
-
- - name: Test
- run: ci/run_tests.sh
- # TODO: Don't continue on error for PyPy
- continue-on-error: ${{ env.IS_PYPY == 'true' }}
- if: always()
-
- - name: Build Version
- run: conda list
-
- - name: Publish test results
- uses: actions/upload-artifact@v3
- with:
- name: Test results
- path: test-data.xml
- if: failure()
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
- with:
- flags: unittests
- name: codecov-pandas
- fail_ci_if_error: false
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
deleted file mode 100644
index 8ca4cce155e96..0000000000000
--- a/.github/workflows/python-dev.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-# This file is purposely frozen(does not run). DO NOT DELETE IT
-# Unfreeze(by commentingthe if: false() condition) once the
-# next Python Dev version has released beta 1 and both Cython and numpy support it
-# After that Python has released, migrate the workflows to the
-# posix GHA workflows/Azure pipelines and "freeze" this file by
-# uncommenting the if: false() condition
-# Feel free to modify this comment as necessary.
-
-name: Python Dev
-
-on:
- push:
- branches:
- - main
- - 1.4.x
- pull_request:
- branches:
- - main
- - 1.4.x
- paths-ignore:
- - "doc/**"
-
-env:
- PYTEST_WORKERS: "auto"
- PANDAS_CI: 1
- PATTERN: "not slow and not network and not clipboard and not single_cpu"
- COVERAGE: true
- PYTEST_TARGET: pandas
-
-jobs:
- build:
- if: false # Comment this line out to "unfreeze"
- runs-on: ${{ matrix.os }}
- strategy:
- fail-fast: false
- matrix:
- os: [ubuntu-latest, macOS-latest, windows-latest]
-
- name: actions-311-dev
- timeout-minutes: 80
-
- concurrency:
- #https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.os }}-${{ matrix.pytest_target }}-dev
- cancel-in-progress: true
-
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Set up Python Dev Version
- uses: actions/setup-python@v3
- with:
- python-version: '3.11-dev'
-
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- - name: Install dependencies
- shell: bash -el {0}
- run: |
- python -m pip install --upgrade pip "setuptools<60.0.0" wheel
- pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
- pip install git+https://github.com/nedbat/coveragepy.git
- pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov
- pip list
-
- - name: Build Pandas
- run: |
- python setup.py build_ext -q -j2
- python -m pip install -e . --no-build-isolation --no-use-pep517
-
- - name: Build Version
- run: |
- python -c "import pandas; pandas.show_versions();"
-
- - name: Test with pytest
- shell: bash -el {0}
- run: |
- ci/run_tests.sh
-
- - name: Publish test results
- uses: actions/upload-artifact@v3
- with:
- name: Test results
- path: test-data.xml
- if: failure()
-
- - name: Report Coverage
- run: |
- coverage report -m
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
- with:
- flags: unittests
- name: codecov-pandas
- fail_ci_if_error: true
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
deleted file mode 100644
index cd19eb7641c8c..0000000000000
--- a/.github/workflows/sdist.yml
+++ /dev/null
@@ -1,91 +0,0 @@
-name: sdist
-
-on:
- push:
- branches:
- - main
- - 1.4.x
- pull_request:
- branches:
- - main
- - 1.4.x
- types: [labeled, opened, synchronize, reopened]
- paths-ignore:
- - "doc/**"
-
-jobs:
- build:
- if: ${{ github.event.label.name == 'Build' || contains(github.event.pull_request.labels.*.name, 'Build') || github.event_name == 'push'}}
- runs-on: ubuntu-latest
- timeout-minutes: 60
- defaults:
- run:
- shell: bash -el {0}
-
- strategy:
- fail-fast: false
- matrix:
- python-version: ["3.8", "3.9", "3.10"]
- concurrency:
- # https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{matrix.python-version}}-sdist
- cancel-in-progress: true
-
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: ${{ matrix.python-version }}
-
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip "setuptools<60.0.0" wheel
-
- # GH 39416
- pip install numpy
-
- - name: Build pandas sdist
- run: |
- pip list
- python setup.py sdist --formats=gztar
-
- - name: Upload sdist artifact
- uses: actions/upload-artifact@v3
- with:
- name: ${{matrix.python-version}}-sdist.gz
- path: dist/*.gz
-
- - uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- activate-environment: pandas-sdist
- channels: conda-forge
- python-version: '${{ matrix.python-version }}'
-
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- - name: Install pandas from sdist
- run: |
- python -m pip install --upgrade "setuptools<60.0.0"
- pip list
- python -m pip install dist/*.gz
-
- - name: Force oldest supported NumPy
- run: |
- case "${{matrix.python-version}}" in
- 3.8)
- pip install numpy==1.18.5 ;;
- 3.9)
- pip install numpy==1.19.3 ;;
- 3.10)
- pip install numpy==1.21.2 ;;
- esac
-
- - name: Import pandas
- run: |
- cd ..
- conda list
- python -c "import pandas; pandas.show_versions();"
diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml
deleted file mode 100644
index b97b60717a2b8..0000000000000
--- a/.github/workflows/stale-pr.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-name: "Stale PRs"
-on:
- schedule:
- # * is a special character in YAML so you have to quote this string
- - cron: "0 0 * * *"
-
-jobs:
- stale:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/stale@v4
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- stale-pr-message: "This pull request is stale because it has been open for thirty days with no activity. Please [update](https://pandas.pydata.org/pandas-docs/stable/development/contributing.html#updating-your-pull-request) and respond to this comment if you're still interested in working on this."
- stale-pr-label: "Stale"
- exempt-pr-labels: "Needs Review,Blocked,Needs Discussion"
- days-before-issue-stale: -1
- days-before-pr-stale: 30
- days-before-close: -1
- remove-stale-when-updated: false
- debug-only: false
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index e6de5caf955fc..5bff8bb412947 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -24,7 +24,7 @@ if [[ $(uname) == "Linux" && -z $DISPLAY ]]; then
XVFB="xvfb-run "
fi
-PYTEST_CMD="${XVFB}pytest -r fEs -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE $PYTEST_TARGET"
+PYTEST_CMD="${XVFB}pytest -v -r fEs -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE $PYTEST_TARGET"
if [[ "$PATTERN" ]]; then
PYTEST_CMD="$PYTEST_CMD -m \"$PATTERN\""
| Example:
```
......x..........................................x.....................................................................................................................x...........xxxWindows fatal exception: stack overflow
Thread 0x000015f0 (most recent call first):
File "C:\Miniconda\envs\pandas-dev\lib\site-packages\execnet\gateway_base.py", line 400 in read
File "C:\Miniconda\envs\pandas-dev\lib\site-packages\execnet\gateway_base.py", line 432 in from_io
File "C:\Miniconda\envs\pandas-dev\lib\site-packages\execnet\gateway_base.py", line 967 in _thread_receiver
File "C:\Miniconda\envs\pandas-dev\lib\site-packages\execnet\gateway_base.py", line 220 in run
File "C:\Miniconda\envs\pandas-dev\lib\site-packages\execnet\gateway_base.py", line 285 in _perform_spawn
Current thread 0x0000101c (most recent call first):
File "D:\a\1\s\pandas\core\construction.py", line 850 in is_empty_data
File "D:\a\1\s\pandas\core\construction.py", line 887 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\series.py", line 427 in __init__
File "D:\a\1\s\pandas\core\construction.py", line 889 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\series.py", line 427 in __init__
File "D:\a\1\s\pandas\core\construction.py", line 889 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\series.py", line 427 in __init__
File "D:\a\1\s\pandas\core\construction.py", line 889 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\series.py", line 427 in __init__
File "D:\a\1\s\pandas\core\construction.py", line 889 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\construction.py", line 889 in create_series_with_explicit_dtype
File "D:\a\1\s\pandas\core\series.py", line 520 in _init_dict
File "D:\a\1\s\pandas\core\series.py", line 427 in __init__
...
```
And another potential timeout: https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=77073&view=logs&jobId=f64eb602-b0ed-5dcc-0a60-a7288ad39597&j=f64eb602-b0ed-5dcc-0a60-a7288ad39597&t=129bd72d-2137-55bd-1811-c4ce50c6dce8
| https://api.github.com/repos/pandas-dev/pandas/pulls/46950 | 2022-05-05T21:56:33Z | 2022-05-07T21:28:06Z | null | 2022-05-07T21:28:09Z |
CI/DOC: Fix to_hdf docstring validation | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c7013cb95f670..e1459a66a0f12 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2601,11 +2601,6 @@ def to_hdf(
followed by fallback to "fixed".
index : bool, default True
Write DataFrame index as a column.
- errors : str, default 'strict'
- Specifies how encoding and decoding errors are to be handled.
- See the errors argument for :func:`open` for a full list
- of options.
- encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
@@ -2616,8 +2611,15 @@ def to_hdf(
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
- of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
+ of the object are indexed. See
+ :ref:`Query via data columns<io.hdf5-query-data-columns>`. for
+ more information.
Applicable only to format='table'.
+ errors : str, default 'strict'
+ Specifies how encoding and decoding errors are to be handled.
+ See the errors argument for :func:`open` for a full list
+ of options.
+ encoding : str, default "UTF-8"
See Also
--------
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/46949 | 2022-05-05T21:26:14Z | 2022-05-06T02:23:50Z | 2022-05-06T02:23:50Z | 2022-05-06T04:18:47Z |
PERF: Remove unnecessary asof join functions | diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 9238d36e0ee16..cc7d863bf326c 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -839,11 +839,16 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
by_t[:] left_by_values,
by_t[:] right_by_values,
bint allow_exact_matches=True,
- tolerance=None):
+ tolerance=None,
+ bint use_hashtable=True):
cdef:
ndarray[intp_t] bli, bri, fli, fri
+ ndarray[intp_t] left_indexer, right_indexer
+ Py_ssize_t left_size, i
+ numeric_t bdiff, fdiff
+
# search both forward and backward
bli, bri = asof_join_backward_on_X_by_Y(
left_values,
@@ -852,6 +857,7 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
right_by_values,
allow_exact_matches,
tolerance,
+ use_hashtable
)
fli, fri = asof_join_forward_on_X_by_Y(
left_values,
@@ -860,26 +866,11 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
right_by_values,
allow_exact_matches,
tolerance,
+ use_hashtable
)
- return _choose_smaller_timestamp(left_values, right_values, bli, bri, fli, fri)
-
-
-cdef _choose_smaller_timestamp(
- numeric_t[:] left_values,
- numeric_t[:] right_values,
- ndarray[intp_t] bli,
- ndarray[intp_t] bri,
- ndarray[intp_t] fli,
- ndarray[intp_t] fri,
-):
- cdef:
- ndarray[intp_t] left_indexer, right_indexer
- Py_ssize_t left_size, i
- numeric_t bdiff, fdiff
-
+ # choose the smaller timestamp
left_size = len(left_values)
-
left_indexer = np.empty(left_size, dtype=np.intp)
right_indexer = np.empty(left_size, dtype=np.intp)
@@ -894,55 +885,3 @@ cdef _choose_smaller_timestamp(
left_indexer[i] = bli[i]
return left_indexer, right_indexer
-
-
-# ----------------------------------------------------------------------
-# asof_join
-# ----------------------------------------------------------------------
-
-def asof_join_backward(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
-
- return asof_join_backward_on_X_by_Y(
- left_values,
- right_values,
- None,
- None,
- allow_exact_matches=allow_exact_matches,
- tolerance=tolerance,
- use_hashtable=False,
- )
-
-
-def asof_join_forward(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
- return asof_join_forward_on_X_by_Y(
- left_values,
- right_values,
- None,
- None,
- allow_exact_matches=allow_exact_matches,
- tolerance=tolerance,
- use_hashtable=False,
- )
-
-
-def asof_join_nearest(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
-
- cdef:
- ndarray[intp_t] bli, bri, fli, fri
-
- # search both forward and backward
- bli, bri = asof_join_backward(left_values, right_values,
- allow_exact_matches, tolerance)
- fli, fri = asof_join_forward(left_values, right_values,
- allow_exact_matches, tolerance)
-
- return _choose_smaller_timestamp(left_values, right_values, bli, bri, fli, fri)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index fbcf8a88d2fee..4227d43c459d0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1691,11 +1691,6 @@ def get_result(self) -> DataFrame:
return result
-def _asof_function(direction: str):
- name = f"asof_join_{direction}"
- return getattr(libjoin, name, None)
-
-
def _asof_by_function(direction: str):
name = f"asof_join_{direction}_on_X_by_Y"
return getattr(libjoin, name, None)
@@ -2017,8 +2012,16 @@ def injection(obj):
)
else:
# choose appropriate function by type
- func = _asof_function(self.direction)
- return func(left_values, right_values, self.allow_exact_matches, tolerance)
+ func = _asof_by_function(self.direction)
+ return func(
+ left_values,
+ right_values,
+ None,
+ None,
+ self.allow_exact_matches,
+ tolerance,
+ False,
+ )
def _get_multiindex_indexer(
| Several functions in the join cython are module are basically just calling others, and can probably be removed. This wouldn't
be a big deal inside python, but in this case it cuts down considerably on the amount of generated cython code.
The tests pass for me locally when I do this and there seems to be significant space savings. | https://api.github.com/repos/pandas-dev/pandas/pulls/46943 | 2022-05-04T20:11:58Z | 2022-05-06T21:29:50Z | 2022-05-06T21:29:50Z | 2022-09-03T16:04:41Z |
TYP: narrow type bounds on extract_array | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 393eb2997f6f0..888e943488953 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -453,30 +453,34 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> npt.NDArray[np.bool_]:
else:
values = extract_array(values, extract_numpy=True, extract_range=True)
- comps = _ensure_arraylike(comps)
- comps = extract_array(comps, extract_numpy=True)
- if not isinstance(comps, np.ndarray):
+ comps_array = _ensure_arraylike(comps)
+ comps_array = extract_array(comps_array, extract_numpy=True)
+ if not isinstance(comps_array, np.ndarray):
# i.e. Extension Array
- return comps.isin(values)
+ return comps_array.isin(values)
- elif needs_i8_conversion(comps.dtype):
+ elif needs_i8_conversion(comps_array.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
- return pd_array(comps).isin(values)
- elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
- # e.g. comps are integers and values are datetime64s
- return np.zeros(comps.shape, dtype=bool)
+ return pd_array(comps_array).isin(values)
+ elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
+ # e.g. comps_array are integers and values are datetime64s
+ return np.zeros(comps_array.shape, dtype=bool)
# TODO: not quite right ... Sparse/Categorical
elif needs_i8_conversion(values.dtype):
- return isin(comps, values.astype(object))
+ return isin(comps_array, values.astype(object))
elif isinstance(values.dtype, ExtensionDtype):
- return isin(np.asarray(comps), np.asarray(values))
+ return isin(np.asarray(comps_array), np.asarray(values))
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
- if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
+ if (
+ len(comps_array) > 1_000_000
+ and len(values) <= 26
+ and not is_object_dtype(comps_array)
+ ):
# If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
@@ -488,12 +492,12 @@ def f(c, v):
f = np.in1d
else:
- common = np.find_common_type([values.dtype, comps.dtype], [])
+ common = np.find_common_type([values.dtype, comps_array.dtype], [])
values = values.astype(common, copy=False)
- comps = comps.astype(common, copy=False)
+ comps_array = comps_array.astype(common, copy=False)
f = htable.ismember
- return f(comps, values)
+ return f(comps_array, values)
def factorize_array(
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 01a04b7aa63d9..4c8d3db7b4672 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -447,9 +447,7 @@ def __init__(
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
- # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
- # attribute "_codes"
- old_codes = extract_array(values)._codes # type: ignore[union-attr]
+ old_codes = extract_array(values)._codes
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories, copy=copy
)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 9ced8f225c3a8..1930580b63b79 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1235,13 +1235,7 @@ def _addsub_object_array(self, other: np.ndarray, op):
res_values = op(self.astype("O"), np.asarray(other))
result = pd_array(res_values.ravel())
- # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no attribute
- # "reshape"
- result = extract_array(
- result, extract_numpy=True
- ).reshape( # type: ignore[union-attr]
- self.shape
- )
+ result = extract_array(result, extract_numpy=True).reshape(self.shape)
return result
def _time_shift(
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 17cdf6665aa99..434302b39fef9 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -9,8 +9,11 @@
from typing import (
TYPE_CHECKING,
Any,
+ Optional,
Sequence,
+ Union,
cast,
+ overload,
)
import warnings
@@ -18,11 +21,13 @@
import numpy.ma as ma
from pandas._libs import lib
+from pandas._libs.tslibs.period import Period
from pandas._typing import (
AnyArrayLike,
ArrayLike,
Dtype,
DtypeObj,
+ T,
)
from pandas.errors import IntCastingNaNError
from pandas.util._exceptions import find_stack_level
@@ -329,7 +334,8 @@ def array(
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
- return PeriodArray._from_sequence(data, copy=copy)
+ period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
+ return PeriodArray._from_sequence(period_data, copy=copy)
elif inferred_dtype == "interval":
return IntervalArray(data, copy=copy)
@@ -376,9 +382,23 @@ def array(
return PandasArray._from_sequence(data, dtype=dtype, copy=copy)
+@overload
def extract_array(
- obj: object, extract_numpy: bool = False, extract_range: bool = False
-) -> Any | ArrayLike:
+ obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
+) -> ArrayLike:
+ ...
+
+
+@overload
+def extract_array(
+ obj: T, extract_numpy: bool = ..., extract_range: bool = ...
+) -> T | ArrayLike:
+ ...
+
+
+def extract_array(
+ obj: T, extract_numpy: bool = False, extract_range: bool = False
+) -> T | ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
@@ -425,12 +445,15 @@ def extract_array(
if isinstance(obj, ABCRangeIndex):
if extract_range:
return obj._values
- return obj
+ # https://github.com/python/mypy/issues/1081
+ # error: Incompatible return value type (got "RangeIndex", expected
+ # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]")
+ return obj # type: ignore[return-value]
- obj = obj._values
+ return obj._values
elif extract_numpy and isinstance(obj, ABCPandasArray):
- obj = obj.to_numpy()
+ return obj.to_numpy()
return obj
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 8451dcb6e412a..7a5db56cb48fe 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -913,12 +913,7 @@ def _list_of_series_to_arrays(
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
- # error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
- # expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
- # Sequence[Union[int, float, complex, str, bytes, generic]],
- # Sequence[Sequence[Any]], _SupportsArray]]"
- content = np.vstack(aligned_values) # type: ignore[arg-type]
-
+ content = np.vstack(aligned_values)
return content, columns
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ef25224e5a847..3019aa1fc2dc7 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1641,9 +1641,7 @@ def _format_strings(self) -> list[str]:
formatter = self.formatter
if formatter is None:
- # error: Item "ndarray" of "Union[Any, Union[ExtensionArray, ndarray]]" has
- # no attribute "_formatter"
- formatter = values._formatter(boxed=True) # type: ignore[union-attr]
+ formatter = values._formatter(boxed=True)
if isinstance(values, Categorical):
# Categorical is special for now, so that we can preserve tzinfo
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fc9671c2fc973..c20ce0c847b61 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -38,6 +38,7 @@
)
from pandas._libs.tslibs import timezones
from pandas._typing import (
+ AnyArrayLike,
ArrayLike,
DtypeArg,
Shape,
@@ -3042,7 +3043,7 @@ def write_array_empty(self, key: str, value: ArrayLike):
node._v_attrs.shape = value.shape
def write_array(
- self, key: str, obj: DataFrame | Series, items: Index | None = None
+ self, key: str, obj: AnyArrayLike, items: Index | None = None
) -> None:
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
| xref https://github.com/pandas-dev/pandas/issues/37715
Narrowing the type bound allows resolving some ignored mypy errors.
The other modified code is needed because `extract_array` no longer returns `Any`, causing more strict type checking in the calling methods. | https://api.github.com/repos/pandas-dev/pandas/pulls/46942 | 2022-05-04T20:09:51Z | 2022-05-25T22:28:01Z | 2022-05-25T22:28:01Z | 2022-08-02T19:28:09Z |
PERF: Improve SeriesGroupBy.value_counts performances with categorical values. | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index cf6f3f92068e8..1152c2253c42d 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -677,6 +677,15 @@ def time_groupby_extra_cat_sort(self):
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby("a", sort=False)["b"].count()
+ def time_series_groupby_value_counts_many_categories(self):
+ df = self.df_extra_cat[0:10**4]
+ df.groupby("b")["a"].value_counts()
+
+ def time_series_groupby_value_counts_few_categories(self):
+ df = self.df_extra_cat[0:10**4].copy()
+ df["a"] = df["a"].cat.remove_unused_categories()
+ df.groupby("b")["a"].value_counts()
+
class Datelike:
# GH 14338
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 931d18dc349f3..cd2126826866f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -468,6 +468,7 @@ Performance improvements
- Performance improvement when setting values in a pyarrow backed string array (:issue:`46400`)
- Performance improvement in :func:`factorize` (:issue:`46109`)
- Performance improvement in :class:`DataFrame` and :class:`Series` constructors for extension dtype scalars (:issue:`45854`)
+- Performance improvement in :meth:`SeriesGroupBy.value_counts` with categorical values. (:issue:`46202`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.bug_fixes:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 245e33fb1a23b..5b79e37586b42 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -609,12 +609,35 @@ def value_counts(
names = self.grouper.names + [self.obj.name]
- if is_categorical_dtype(val.dtype) or (
- bins is not None and not np.iterable(bins)
- ):
+ if is_categorical_dtype(val.dtype):
+ df = self.obj.to_frame()
+ df.columns = Index([self.obj.name])
+ # GH38672 relates to categorical dtype
+ groupby = DataFrameGroupBy(
+ df,
+ self.grouper,
+ axis=self.axis,
+ level=self.level,
+ grouper=self.grouper,
+ exclusions=self.exclusions,
+ as_index=self.as_index,
+ sort=self.sort,
+ group_keys=self.group_keys,
+ squeeze=self.squeeze,
+ observed=self.observed,
+ mutated=self.mutated,
+ dropna=self.dropna,
+ )
+ ser = groupby.value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+ ser.name = self.obj.name
+ return ser
+
+ if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
- # GH38672 relates to categorical dtype
+
ser = self.apply(
Series.value_counts,
normalize=normalize,
| - [X] closes #46202
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
The performance issue comes from the fact the categorical path relies on `GroupBy.apply`. Actually, `SeriesGroupby.value_counts` and `DataFrameGroupBy.value_counts` have different implementations, and I feel they should both rely on a single one. Typically, the Series implementation looks more complicated than the DataFrame one (which relies on `GroupBy.size`), this is why this change creates a `DataFrameGroupBy` object to rely on the DataFrame implementation. Ideally, I think the entire Series implementation could rely on the DataFrame one (not only the categorical path). Also, I have a small doubt whether creating a new `DataFrameGroupBy` object out of a `SeriesGroupBy` one is a good practice or not, so if there is a better way to do here feel free to suggest it.
Note I did not add a test in this PR as it fixes performance only, but I manually tested and confirmed the performance improvement. | https://api.github.com/repos/pandas-dev/pandas/pulls/46940 | 2022-05-04T13:44:04Z | 2022-08-01T20:24:35Z | null | 2022-08-01T20:24:41Z |
ENH: validates boolean kwargs in DataFrame and Series methods | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 931d18dc349f3..7828e05892887 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -125,6 +125,7 @@ Other enhancements
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
+- Added validation of boolean kwargs in string series and DataFrame methods, along with tests (:issue:`16714`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ef5e6dd1d6757..073df7b512b88 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -95,6 +95,7 @@
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
+ validate_bool_kwargs_from_keywords,
)
from pandas.core.dtypes.cast import (
@@ -816,6 +817,7 @@ def __init__(
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
+ @validate_bool_kwargs_from_keywords('nan_as_null', 'allow_copy')
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
) -> DataFrameXchg:
@@ -986,6 +988,7 @@ def _repr_fits_vertical_(self) -> bool:
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
+ @validate_bool_kwargs_from_keywords('ignore_width')
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
@@ -1165,6 +1168,7 @@ def to_string(
"references the column, while the value defines the space to use.",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
+ @validate_bool_kwargs_from_keywords('index', 'index_names', 'show_dimensions')
def to_string(
self,
buf: FilePath | WriteBuffer[str] | None = None,
@@ -1379,7 +1383,7 @@ def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k).__finalize__(self)
yield k, s
-
+ @validate_bool_kwargs_from_keywords('index')
def itertuples(
self, index: bool = True, name: str | None = "Pandas"
) -> Iterable[tuple[Any, ...]]:
@@ -1740,6 +1744,7 @@ def create_index(indexlist, namelist):
columns = create_index(data["columns"], data["column_names"])
return cls(realdata, index=index, columns=columns, dtype=dtype)
+ @validate_bool_kwargs_from_keywords('copy')
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
@@ -2011,6 +2016,7 @@ def to_dict(self, orient: str = "dict", into=dict):
else:
raise ValueError(f"orient '{orient}' not understood")
+ @validate_bool_kwargs_from_keywords('reauth', 'auth_local_webserver', 'progress_bar')
def to_gbq(
self,
destination_table: str,
@@ -2496,6 +2502,7 @@ def to_records(
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
+ @validate_bool_kwargs_from_keywords('verify_integrity')
def _from_arrays(
cls,
arrays,
@@ -2550,6 +2557,7 @@ def _from_arrays(
compression_options=_shared_docs["compression_options"] % "path",
)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
+ @validate_bool_kwargs_from_keywords('write_index')
def to_stata(
self,
path: FilePath | WriteBuffer[bytes],
@@ -2911,6 +2919,7 @@ def to_parquet(
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
+ @validate_bool_kwargs_from_keywords('index', 'index_names', 'bold_rows', 'escape', 'notebook', 'render_links')
def to_html(
self,
buf: FilePath | WriteBuffer[str] | None = None,
@@ -3001,6 +3010,7 @@ def to_html(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buffer",
)
+ @validate_bool_kwargs_from_keywords('index')
def to_xml(
self,
path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
@@ -3233,6 +3243,7 @@ def info(
show_counts=show_counts,
)
+ @validate_bool_kwargs_from_keywords('index', 'deep')
def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
"""
Return the memory usage of each column in bytes.
@@ -3333,6 +3344,7 @@ def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
result = index_memory_usage._append(result)
return result
+ @validate_bool_kwargs_from_keywords('copy')
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
@@ -3652,6 +3664,7 @@ def _getitem_multilevel(self, key):
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
+ @validate_bool_kwargs_from_keywords('takeable')
def _get_value(self, index, col, takeable: bool = False) -> Scalar:
"""
Quickly retrieve single value at passed column and index.
@@ -4004,6 +4017,7 @@ def _reset_cacher(self) -> None:
# no-op for DataFrame
pass
+ @validate_bool_kwargs_from_keywords('inplace')
def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None:
"""
The object has called back to us saying maybe it has changed.
@@ -4021,6 +4035,7 @@ def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None:
# ----------------------------------------------------------------------
# Unsorted
+ @validate_bool_kwargs_from_keywords('inplace')
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
@@ -4186,6 +4201,7 @@ def query(self, expr: str, inplace: bool = False, **kwargs):
else:
return result
+ @validate_bool_kwargs_from_keywords('inplace')
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
@@ -4758,6 +4774,7 @@ def _reindex_multi(
)
@doc(NDFrame.align, **_shared_doc_kwargs)
+ @validate_bool_kwargs_from_keywords('copy')
def align(
self,
other,
@@ -4844,6 +4861,7 @@ def set_axis(
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
+ @validate_bool_kwargs_from_keywords('inplace')
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@@ -4913,6 +4931,7 @@ def drop(
# error: Signature of "drop" incompatible with supertype "NDFrame"
# github.com/python/mypy/issues/12387
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
+ @validate_bool_kwargs_from_keywords('inplace')
def drop( # type: ignore[override]
self,
labels: Hashable | list[Hashable] = None,
@@ -5115,6 +5134,7 @@ def rename(
) -> DataFrame | None:
...
+ @validate_bool_kwargs_from_keywords('inplace', 'copy')
def rename(
self,
mapper: Renamer | None = None,
@@ -5362,6 +5382,7 @@ def fillna(
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
@doc(NDFrame.fillna, **_shared_doc_kwargs)
+ @validate_bool_kwargs_from_keywords('inplace')
def fillna(
self,
value: object | ArrayLike | None = None,
@@ -5423,6 +5444,7 @@ def pop(self, item: Hashable) -> Series:
"""
return super().pop(item=item)
+ @validate_bool_kwargs_from_keywords('inplace')
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
@@ -5559,6 +5581,7 @@ def shift(
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "keys"])
+ @validate_bool_kwargs_from_keywords('inplace', 'drop', 'append', 'verify_integrity')
def set_index(
self,
keys,
@@ -5838,6 +5861,7 @@ def reset_index(
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
+ @validate_bool_kwargs_from_keywords('inplace', 'drop')
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = None,
@@ -6110,6 +6134,7 @@ def notnull(self) -> DataFrame:
return ~self.isna()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace')
def dropna(
self,
axis: Axis = 0,
@@ -6273,6 +6298,7 @@ def dropna(
return result
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "subset"])
+ @validate_bool_kwargs_from_keywords('inplace', 'ignore_index')
def drop_duplicates(
self,
subset: Hashable | Sequence[Hashable] | None = None,
@@ -6519,6 +6545,7 @@ def f(vals) -> tuple[np.ndarray, int]:
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "by"])
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
+ @validate_bool_kwargs_from_keywords('inplace', 'ignore_index')
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
@@ -6642,6 +6669,7 @@ def sort_index(
# error: Signature of "sort_index" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace', 'sort_remaining', 'ignore_index')
def sort_index( # type: ignore[override]
self,
axis: Axis = 0,
@@ -6755,6 +6783,7 @@ def sort_index( # type: ignore[override]
key=key,
)
+ @validate_bool_kwargs_from_keywords('normalize', 'sort', 'ascending', 'dropna')
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
@@ -7482,6 +7511,7 @@ def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]:
""",
klass=_shared_doc_kwargs["klass"],
)
+ @validate_bool_kwargs_from_keywords('keep_shape', 'keep_equal')
def compare(
self,
other: DataFrame,
@@ -7496,6 +7526,7 @@ def compare(
keep_equal=keep_equal,
)
+ @validate_bool_kwargs_from_keywords('overwrite')
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
@@ -7741,6 +7772,7 @@ def combiner(x, y):
return combined
+ @validate_bool_kwargs_from_keywords('overwrite')
def update(
self,
other,
@@ -8000,6 +8032,7 @@ def update(
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
+ @validate_bool_kwargs_from_keywords('as_index', 'sort', 'observed', 'dropna')
def groupby(
self,
by=None,
@@ -8373,6 +8406,7 @@ def pivot_table(
sort=sort,
)
+ @validate_bool_kwargs_from_keywords('dropna')
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
@@ -8548,6 +8582,7 @@ def stack(self, level: Level = -1, dropna: bool = True):
return result.__finalize__(self, method="stack")
+ @validate_bool_kwargs_from_keywords('ignore_index')
def explode(
self,
column: IndexLabel,
@@ -9003,6 +9038,7 @@ def transform(
assert isinstance(result, DataFrame)
return result
+ @validate_bool_kwargs_from_keywords('raw')
def apply(
self,
func: AggFuncType,
@@ -9250,6 +9286,7 @@ def infer(x):
# ----------------------------------------------------------------------
# Merging / joining methods
+ @validate_bool_kwargs_from_keywords('ignore_index', 'verify_integrity', 'sort')
def append(
self,
other,
@@ -9362,6 +9399,7 @@ def append(
return self._append(other, ignore_index, verify_integrity, sort)
+ @validate_bool_kwargs_from_keywords('ignore_index', 'verify_integrity', 'sort')
def _append(
self,
other,
@@ -9421,6 +9459,7 @@ def _append(
result = result.reindex(combined_columns, axis=1)
return result.__finalize__(self, method="append")
+ @validate_bool_kwargs_from_keywords('sort')
def join(
self,
other: DataFrame | Series,
@@ -9594,6 +9633,7 @@ def join(
validate=validate,
)
+ @validate_bool_kwargs_from_keywords('sort')
def _join_compat(
self,
other: DataFrame | Series,
@@ -9677,6 +9717,7 @@ def _join_compat(
@Substitution("")
@Appender(_merge_doc, indents=2)
+ @validate_bool_kwargs_from_keywords('left_index', 'right_index', 'copy', 'indicator', 'sort')
def merge(
self,
right: DataFrame | Series,
@@ -9829,6 +9870,7 @@ def _series_round(ser: Series, decimals: int):
# ----------------------------------------------------------------------
# Statistical methods, etc.
+ @validate_bool_kwargs_from_keywords('numeric_only')
def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
@@ -9942,6 +9984,7 @@ def corr(
return self._constructor(correl, index=idx, columns=cols)
+ @validate_bool_kwargs_from_keywords('numeric_only')
def cov(
self,
min_periods: int | None = None,
@@ -10071,6 +10114,7 @@ def cov(
return self._constructor(base_cov, index=idx, columns=cols)
+ @validate_bool_kwargs_from_keywords('numeric_only')
def corrwith(
self,
other,
@@ -10235,6 +10279,7 @@ def c(x):
# ----------------------------------------------------------------------
# ndarray-like stats methods
+ @validate_bool_kwargs_from_keywords('numeric_only')
def count(
self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False
):
@@ -10340,6 +10385,7 @@ def count(
return result.astype("int64").__finalize__(self, method="count")
+ @validate_bool_kwargs_from_keywords('numeric_only')
def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
if numeric_only:
frame = self._get_numeric_data()
@@ -10385,6 +10431,7 @@ def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
return result
+ @validate_bool_kwargs_from_keywords('numeric_only')
def _reduce(
self,
op,
@@ -10537,6 +10584,7 @@ def _get_data() -> DataFrame:
result = self._constructor_sliced(result, index=labels)
return result
+ @validate_bool_kwargs_from_keywords('skipna')
def _reduce_axis1(self, name: str, func, skipna: bool) -> Series:
"""
Special case for _reduce to try to avoid a potentially-expensive transpose.
@@ -10565,6 +10613,7 @@ def _reduce_axis1(self, name: str, func, skipna: bool) -> Series:
res_ser = self._constructor_sliced(result, index=self.index)
return res_ser
+ @validate_bool_kwargs_from_keywords('dropna')
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count number of distinct elements in specified axis.
@@ -10606,6 +10655,7 @@ def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
return self.apply(Series.nunique, axis=axis, dropna=dropna)
@doc(_shared_docs["idxmin"], numeric_only_default="False")
+ @validate_bool_kwargs_from_keywords('skipna', 'numeric_only')
def idxmin(
self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False
) -> Series:
@@ -10630,6 +10680,7 @@ def idxmin(
return data._constructor_sliced(result, index=data._get_agg_axis(axis))
@doc(_shared_docs["idxmax"], numeric_only_default="False")
+ @validate_bool_kwargs_from_keywords('skipna', 'numeric_only')
def idxmax(
self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False
) -> Series:
@@ -10665,6 +10716,7 @@ def _get_agg_axis(self, axis_num: int) -> Index:
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
+ @validate_bool_kwargs_from_keywords('skipna', 'dropna')
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
@@ -10884,6 +10936,7 @@ def quantile(
return result.__finalize__(self, method="quantile")
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
+ @validate_bool_kwargs_from_keywords('normalize')
def asfreq(
self,
freq: Frequency,
@@ -10933,6 +10986,7 @@ def resample(
group_keys=group_keys,
)
+ @validate_bool_kwargs_from_keywords('copy')
def to_timestamp(
self,
freq: Frequency | None = None,
@@ -10971,6 +11025,7 @@ def to_timestamp(
setattr(new_obj, axis_name, new_ax)
return new_obj
+ @validate_bool_kwargs_from_keywords('copy')
def to_period(
self, freq: Frequency | None = None, axis: Axis = 0, copy: bool = True
) -> DataFrame:
@@ -11170,6 +11225,7 @@ def _AXIS_NAMES(self) -> dict[int, str]:
# ----------------------------------------------------------------------
# Internal Interface Methods
+ @validate_bool_kwargs_from_keywords('copy')
def _to_dict_of_blocks(self, copy: bool = True):
"""
Return a dict of dtype -> Constructor Types that
@@ -11264,6 +11320,7 @@ def values(self) -> np.ndarray:
return self._mgr.as_array()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace')
def ffill(
self: DataFrame,
axis: None | Axis = None,
@@ -11274,6 +11331,7 @@ def ffill(
return super().ffill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace')
def bfill(
self: DataFrame,
axis: None | Axis = None,
@@ -11286,6 +11344,7 @@ def bfill(
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "lower", "upper"]
)
+ @validate_bool_kwargs_from_keywords('inplace')
def clip(
self: DataFrame,
lower=None,
@@ -11298,6 +11357,7 @@ def clip(
return super().clip(lower, upper, axis, inplace, *args, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
+ @validate_bool_kwargs_from_keywords('inplace')
def interpolate(
self: DataFrame,
method: str = "linear",
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1d3509cac0edd..b84b195ddb2b1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -64,6 +64,7 @@
validate_ascending,
validate_bool_kwarg,
validate_percentile,
+ validate_bool_kwargs_from_keywords,
)
from pandas.core.dtypes.cast import (
@@ -330,6 +331,7 @@ class Series(base.IndexOpsMixin, NDFrame):
# ----------------------------------------------------------------------
# Constructors
+ @validate_bool_kwargs_from_keywords('copy', 'fastpath')
def __init__(
self,
data=None,
@@ -1055,6 +1057,7 @@ def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series:
new_mgr = self._mgr.getitem_mgr(indexer)
return self._constructor(new_mgr).__finalize__(self)
+ @validate_bool_kwargs_from_keywords('takeable')
def _get_value(self, label, takeable: bool = False):
"""
Quickly retrieve single value at passed index label.
@@ -1201,6 +1204,7 @@ def _set_values(self, key, value) -> None:
self._mgr = self._mgr.setitem(indexer=key, value=value)
self._maybe_update_cacher()
+ @validate_bool_kwargs_from_keywords('takeable')
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
@@ -1272,6 +1276,7 @@ def _check_is_chained_assignment_possible(self) -> bool:
return True
return super()._check_is_chained_assignment_possible()
+ @validate_bool_kwargs_from_keywords('clear', 'verify_is_copy', 'inplace')
def _maybe_update_cacher(
self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False
) -> None:
@@ -1368,6 +1373,7 @@ def repeat(self, repeats, axis=None) -> Series:
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
+ @validate_bool_kwargs_from_keywords('allow_duplicates')
def reset_index(
self,
level=None,
@@ -1639,6 +1645,7 @@ def to_string(
+----+----------+"""
),
)
+ @validate_bool_kwargs_from_keywords('index')
def to_markdown(
self,
buf: IO[str] | None = None,
@@ -1824,6 +1831,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
df = self._constructor_expanddim(mgr)
return df.__finalize__(self, method="to_frame")
+ @validate_bool_kwargs_from_keywords('inplace')
def _set_name(self, name, inplace=False) -> Series:
"""
Set the Series name.
@@ -1921,6 +1929,7 @@ def _set_name(self, name, inplace=False) -> Series:
Name: Max Speed, dtype: float64
"""
)
+ @validate_bool_kwargs_from_keywords('as_index', 'sort', 'observed', 'dropna')
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
@@ -2032,6 +2041,7 @@ def count(self, level=None):
self, method="count"
)
+ @validate_bool_kwargs_from_keywords('dropna')
def mode(self, dropna: bool = True) -> Series:
"""
Return the mode(s) of the Series.
@@ -2890,6 +2900,7 @@ def searchsorted( # type: ignore[override]
# -------------------------------------------------------------------
# Combination
+ @validate_bool_kwargs_from_keywords('ignore_index', 'verify_integrity')
def append(
self, to_append, ignore_index: bool = False, verify_integrity: bool = False
):
@@ -2976,6 +2987,7 @@ def append(
return self._append(to_append, ignore_index, verify_integrity)
+ @validate_bool_kwargs_from_keywords('ignore_index', 'verify_integrity')
def _append(
self, to_append, ignore_index: bool = False, verify_integrity: bool = False
):
@@ -3129,6 +3141,7 @@ def _construct_result(
""",
klass=_shared_doc_kwargs["klass"],
)
+ @validate_bool_kwargs_from_keywords('keep_shape', 'keep_equal')
def compare(
self,
other: Series,
@@ -3371,6 +3384,7 @@ def update(self, other) -> None:
# Reindexing, sorting
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace', 'ignore_index')
def sort_values(
self,
axis=0,
@@ -3630,6 +3644,7 @@ def sort_index(
# error: Signature of "sort_index" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace', 'sort_remaining', 'ignore_index')
def sort_index( # type: ignore[override]
self,
axis: Axis = 0,
@@ -4132,6 +4147,7 @@ def reorder_levels(self, order) -> Series:
result.index = result.index.reorder_levels(order)
return result
+ @validate_bool_kwargs_from_keywords('ignore_index')
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
@@ -4412,6 +4428,7 @@ def transform(
).transform()
return result
+ @validate_bool_kwargs_from_keywords('convert_dtype')
def apply(
self,
func: AggFuncType,
@@ -4657,6 +4674,7 @@ def rename(
) -> Series | None:
...
+ @validate_bool_kwargs_from_keywords('copy', 'inplace')
def rename(
self,
index: Renamer | Hashable | None = None,
@@ -4789,6 +4807,7 @@ def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> Series | No
see_also_sub="",
)
@Appender(NDFrame.set_axis.__doc__)
+ @validate_bool_kwargs_from_keywords('inplace')
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@@ -4857,6 +4876,7 @@ def drop(
# error: Signature of "drop" incompatible with supertype "NDFrame"
# github.com/python/mypy/issues/12387
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
+ @validate_bool_kwargs_from_keywords('inplace')
def drop( # type: ignore[override]
self,
labels: Hashable | list[Hashable] = None,
@@ -5152,6 +5172,7 @@ def replace(
)
@doc(INFO_DOCSTRING, **series_sub_kwargs)
+ @validate_bool_kwargs_from_keywords('show_counts')
def info(
self,
verbose: bool | None = None,
@@ -5167,6 +5188,7 @@ def info(
show_counts=show_counts,
)
+ @validate_bool_kwargs_from_keywords('inplace')
def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
@@ -5196,6 +5218,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
+ @validate_bool_kwargs_from_keywords('index', 'deep')
def memory_usage(self, index: bool = True, deep: bool = False) -> int:
"""
Return the memory usage of the Series.
@@ -5430,6 +5453,7 @@ def between(self, left, right, inclusive="both") -> Series:
# ----------------------------------------------------------------------
# Convert to types that support pd.NA
+ @validate_bool_kwargs_from_keywords('infer_objects', 'convert_string', 'convert_integer', 'convert_boolean', 'convert_floating')
def _convert_dtypes(
self,
infer_objects: bool = True,
@@ -5577,6 +5601,7 @@ def dropna(self, axis=0, inplace=False, how=None):
# error: Cannot determine type of 'asfreq'
@doc(NDFrame.asfreq, **_shared_doc_kwargs) # type: ignore[has-type]
+ @validate_bool_kwargs_from_keywords('normalize')
def asfreq(
self,
freq,
@@ -5684,6 +5709,7 @@ def to_period(self, freq=None, copy=True) -> Series:
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace')
def ffill(
self: Series,
axis: None | Axis = None,
@@ -5694,6 +5720,7 @@ def ffill(
return super().ffill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ @validate_bool_kwargs_from_keywords('inplace')
def bfill(
self: Series,
axis: None | Axis = None,
@@ -5706,6 +5733,7 @@ def bfill(
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "lower", "upper"]
)
+ @validate_bool_kwargs_from_keywords('inplace')
def clip(
self: Series,
lower=None,
@@ -5718,6 +5746,7 @@ def clip(
return super().clip(lower, upper, axis, inplace, *args, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
+ @validate_bool_kwargs_from_keywords('inplace')
def interpolate(
self: Series,
method: str = "linear",
diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py
index e99e0a6863848..02dc0cf8e2605 100644
--- a/pandas/tests/frame/test_validate.py
+++ b/pandas/tests/frame/test_validate.py
@@ -39,3 +39,10 @@ def test_validate_bool_args(self, dataframe, func, inplace):
with pytest.raises(ValueError, match=msg):
getattr(dataframe, func)(**kwargs)
+
+@pytest.mark.parametrize('keyword', ('nan_as_null', 'allow_copy', 'ignore_width', 'index', 'index_names', 'show_dimensions', 'copy', 'inplace', 'reauth', 'auth_local_webserver', 'progress_bar', 'verify_integrity', 'write_index', 'bold_rows', 'escape', 'notebook', 'render_links', 'deep', 'takeable', 'drop', 'append', 'ignore_index', 'sort_remaining', 'normalize', 'ascending', 'dropna', 'keep_shape', 'keep_equal', 'overwrite', 'as_index', 'observed', 'sort', 'raw', 'left_index', 'right_index', 'numeric_only', 'skipna'))
+def test_set_index_validation(dataframe, func, keyword):
+ msg = 'For argument "{}" expected type bool'.format(keyword)
+ kwargs = {keyword: 'hello'}
+ with pytest.raises(ValueError, match=msg):
+ getattr(dataframe, func)(**kwargs)
\ No newline at end of file
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
index 3c867f7582b7d..843f460649fb0 100644
--- a/pandas/tests/series/test_validate.py
+++ b/pandas/tests/series/test_validate.py
@@ -24,3 +24,10 @@ def test_validate_bool_args(string_series, func, inplace):
with pytest.raises(ValueError, match=msg):
getattr(string_series, func)(**kwargs)
+
+@pytest.mark.parametrize('keyword', ('copy', 'fastpath', 'takeable', 'clear', 'verify_is_copy', 'inplace', 'allow_duplicates', 'index', 'as_index', 'sort', 'observed', 'dropna', 'ignore_index', 'verify_integrity', 'keep_shape', 'keep_equal', 'inplace', 'sort_remaining' , 'convert_dtype', 'show_counts', 'deep', 'infer_objects', 'convert_string', 'convert_integer', 'convert_boolean', 'convert_floating', 'normalize'))
+def test_set_index_validation(string_series, func, keyword):
+ msg = 'For argument "{}" expected type bool'.format(keyword)
+ kwargs = {keyword: 'hello'}
+ with pytest.raises(ValueError, match=msg):
+ getattr(string_series, func)(**kwargs)
\ No newline at end of file
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 8e3de9404fbee..757c84feedb3f 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -12,6 +12,8 @@
import numpy as np
+import functools
+
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -518,3 +520,39 @@ def validate_insert_loc(loc: int, length: int) -> int:
if not 0 <= loc <= length:
raise IndexError(f"loc must be an integer between -{length} and {length}")
return loc
+
+def validate_bool_kwargs_from_keywords(*keywords):
+ """
+ Takes keywords and ensures all are type bool, using validate_bool_kwarg
+
+ Example Usage:
+ @validate_bool_kwargs_from_keywords('copy', 'inplace')
+ def method(##that takes bool kwargs## copy: bool = False, inplace: bool = False):
+
+ Used as a decorator above methods. Uses functools.
+
+ validate_bool_kwarg:
+ def validate_bool_kwarg(value, arg_name, none_allowed=True, int_allowed=False):
+ good_value = is_bool(value)
+ if none_allowed:
+ good_value = good_value or value is None
+
+ if int_allowed:
+ good_value = good_value or isinstance(value, int)
+
+ if not good_value:
+ raise ValueError(
+ f'For argument "{arg_name}" expected type bool, received '
+ f"type {type(value).__name__}."
+ )
+ return value
+ """
+ words = set(keywords)
+ def validate_bool_kwargs_from_keywords_inner(func):
+ @functools.wraps(func)
+ def validator(*args, **kwargs):
+ for word in words.intersection(kwargs.keys()):
+ validate_bool_kwarg(kwargs[word], word)
+ return func(*args, **kwargs)
+ return validator
+ return validate_bool_kwargs_from_keywords_inner
\ No newline at end of file
| - [X] closes #16714 (for DataFrame and Series)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] pre commit code check passed
- [X] Added an entry in the latest whatsnew.
(First commit is an extra frame.py and the second is deleting it- all frame.py's are in the right place)
| https://api.github.com/repos/pandas-dev/pandas/pulls/46938 | 2022-05-04T10:08:07Z | 2022-06-23T22:20:57Z | null | 2022-06-23T22:20:58Z |
TYP: enable reportUnusedImport | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 01ff2e1e1f181..eb5ce71141f46 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -1,4 +1,3 @@
-# flake8: noqa
from __future__ import annotations
__docformat__ = "restructuredtext"
@@ -20,7 +19,7 @@
del _hard_dependencies, _dependency, _missing_dependencies
# numpy compat
-from pandas.compat import is_numpy_dev as _is_numpy_dev
+from pandas.compat import is_numpy_dev as _is_numpy_dev # pyright: ignore # noqa:F401
try:
from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib
@@ -44,7 +43,7 @@
)
# let init-time option registration happen
-import pandas.core.config_init
+import pandas.core.config_init # pyright: ignore # noqa:F401
from pandas.core.api import (
# dtype
@@ -134,7 +133,8 @@
qcut,
)
-from pandas import api, arrays, errors, io, plotting, testing, tseries
+from pandas import api, arrays, errors, io, plotting, tseries
+from pandas import testing # noqa:PDF015
from pandas.util._print_versions import show_versions
from pandas.io.api import (
diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py
index 65936a9fcdbf3..929f8a5af6b3f 100644
--- a/pandas/_config/__init__.py
+++ b/pandas/_config/__init__.py
@@ -16,7 +16,7 @@
"options",
]
from pandas._config import config
-from pandas._config import dates # noqa:F401
+from pandas._config import dates # pyright: ignore # noqa:F401
from pandas._config.config import (
describe_option,
get_option,
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 5e90eae27f981..1035fd08a1a36 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -19,7 +19,7 @@
import numpy as np
-from pandas._config.localization import ( # noqa:F401
+from pandas._config.localization import (
can_set_locale,
get_locales,
set_locale,
@@ -49,7 +49,7 @@
Series,
bdate_range,
)
-from pandas._testing._io import ( # noqa:F401
+from pandas._testing._io import (
close,
network,
round_trip_localpath,
@@ -57,16 +57,16 @@
round_trip_pickle,
write_to_compressed,
)
-from pandas._testing._random import ( # noqa:F401
+from pandas._testing._random import (
randbool,
rands,
rands_array,
)
-from pandas._testing._warnings import ( # noqa:F401
+from pandas._testing._warnings import (
assert_produces_warning,
maybe_produces_warning,
)
-from pandas._testing.asserters import ( # noqa:F401
+from pandas._testing.asserters import (
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
@@ -91,11 +91,11 @@
assert_timedelta_array_equal,
raise_assert_detail,
)
-from pandas._testing.compat import ( # noqa:F401
+from pandas._testing.compat import (
get_dtype,
get_obj,
)
-from pandas._testing.contexts import ( # noqa:F401
+from pandas._testing.contexts import (
RNGContext,
decompress_file,
ensure_clean,
@@ -1033,3 +1033,128 @@ def shares_memory(left, right) -> bool:
return shares_memory(arr, right)
raise NotImplementedError(type(left), type(right))
+
+
+__all__ = [
+ "ALL_INT_EA_DTYPES",
+ "ALL_INT_NUMPY_DTYPES",
+ "ALL_NUMPY_DTYPES",
+ "ALL_REAL_NUMPY_DTYPES",
+ "all_timeseries_index_generator",
+ "assert_almost_equal",
+ "assert_attr_equal",
+ "assert_categorical_equal",
+ "assert_class_equal",
+ "assert_contains_all",
+ "assert_copy",
+ "assert_datetime_array_equal",
+ "assert_dict_equal",
+ "assert_equal",
+ "assert_extension_array_equal",
+ "assert_frame_equal",
+ "assert_index_equal",
+ "assert_indexing_slices_equivalent",
+ "assert_interval_array_equal",
+ "assert_is_sorted",
+ "assert_is_valid_plot_return_object",
+ "assert_metadata_equivalent",
+ "assert_numpy_array_equal",
+ "assert_period_array_equal",
+ "assert_produces_warning",
+ "assert_series_equal",
+ "assert_sp_array_equal",
+ "assert_timedelta_array_equal",
+ "at",
+ "BOOL_DTYPES",
+ "box_expected",
+ "BYTES_DTYPES",
+ "can_set_locale",
+ "close",
+ "COMPLEX_DTYPES",
+ "convert_rows_list_to_csv_str",
+ "DATETIME64_DTYPES",
+ "decompress_file",
+ "EMPTY_STRING_PATTERN",
+ "ENDIAN",
+ "ensure_clean",
+ "ensure_clean_dir",
+ "ensure_safe_environment_variables",
+ "equalContents",
+ "external_error_raised",
+ "FLOAT_EA_DTYPES",
+ "FLOAT_NUMPY_DTYPES",
+ "getCols",
+ "get_cython_table_params",
+ "get_dtype",
+ "getitem",
+ "get_locales",
+ "getMixedTypeDict",
+ "get_obj",
+ "get_op_from_name",
+ "getPeriodData",
+ "getSeriesData",
+ "getTimeSeriesData",
+ "iat",
+ "iloc",
+ "index_subclass_makers_generator",
+ "loc",
+ "makeBoolIndex",
+ "makeCategoricalIndex",
+ "makeCustomDataframe",
+ "makeCustomIndex",
+ "makeDataFrame",
+ "makeDateIndex",
+ "makeFloatIndex",
+ "makeFloatSeries",
+ "makeIntervalIndex",
+ "makeIntIndex",
+ "makeMissingDataframe",
+ "makeMixedDataFrame",
+ "makeMultiIndex",
+ "makeNumericIndex",
+ "makeObjectSeries",
+ "makePeriodFrame",
+ "makePeriodIndex",
+ "makePeriodSeries",
+ "make_rand_series",
+ "makeRangeIndex",
+ "makeStringIndex",
+ "makeStringSeries",
+ "makeTimeDataFrame",
+ "makeTimedeltaIndex",
+ "makeTimeSeries",
+ "makeUIntIndex",
+ "maybe_produces_warning",
+ "NARROW_NP_DTYPES",
+ "network",
+ "NP_NAT_OBJECTS",
+ "NULL_OBJECTS",
+ "OBJECT_DTYPES",
+ "raise_assert_detail",
+ "randbool",
+ "rands",
+ "reset_display_options",
+ "reset_testing_mode",
+ "RNGContext",
+ "round_trip_localpath",
+ "round_trip_pathlib",
+ "round_trip_pickle",
+ "setitem",
+ "set_locale",
+ "set_testing_mode",
+ "set_timezone",
+ "shares_memory",
+ "SIGNED_INT_EA_DTYPES",
+ "SIGNED_INT_NUMPY_DTYPES",
+ "STRING_DTYPES",
+ "SubclassedCategorical",
+ "SubclassedDataFrame",
+ "SubclassedSeries",
+ "TIMEDELTA64_DTYPES",
+ "to_array",
+ "UNSIGNED_INT_EA_DTYPES",
+ "UNSIGNED_INT_NUMPY_DTYPES",
+ "use_numexpr",
+ "with_csv_dialect",
+ "write_to_compressed",
+]
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index 67fd722c9198b..22a09ed61d694 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,7 +1,14 @@
""" public toolkit API """
-from pandas.api import ( # noqa:F401
+from pandas.api import (
exchange,
extensions,
indexers,
types,
)
+
+__all__ = [
+ "exchange",
+ "extensions",
+ "indexers",
+ "types",
+]
diff --git a/pandas/conftest.py b/pandas/conftest.py
index eb17aac99a904..e176707d8a8f1 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1734,7 +1734,7 @@ def spmatrix(request):
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
- if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
+ if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick"
]
)
def tick_classes(request):
diff --git a/pandas/core/api.py b/pandas/core/api.py
index cf082d2013d3b..c2bedb032d479 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas._libs import (
NaT,
Period,
@@ -84,3 +82,65 @@
# DataFrame needs to be imported after NamedAgg to avoid a circular import
from pandas.core.frame import DataFrame # isort:skip
+
+__all__ = [
+ "array",
+ "bdate_range",
+ "BooleanDtype",
+ "Categorical",
+ "CategoricalDtype",
+ "CategoricalIndex",
+ "DataFrame",
+ "DateOffset",
+ "date_range",
+ "DatetimeIndex",
+ "DatetimeTZDtype",
+ "factorize",
+ "Flags",
+ "Float32Dtype",
+ "Float64Dtype",
+ "Float64Index",
+ "Grouper",
+ "Index",
+ "IndexSlice",
+ "Int16Dtype",
+ "Int32Dtype",
+ "Int64Dtype",
+ "Int64Index",
+ "Int8Dtype",
+ "Interval",
+ "IntervalDtype",
+ "IntervalIndex",
+ "interval_range",
+ "isna",
+ "isnull",
+ "MultiIndex",
+ "NA",
+ "NamedAgg",
+ "NaT",
+ "notna",
+ "notnull",
+ "NumericIndex",
+ "Period",
+ "PeriodDtype",
+ "PeriodIndex",
+ "period_range",
+ "RangeIndex",
+ "Series",
+ "set_eng_float_format",
+ "StringDtype",
+ "Timedelta",
+ "TimedeltaIndex",
+ "timedelta_range",
+ "Timestamp",
+ "to_datetime",
+ "to_numeric",
+ "to_timedelta",
+ "UInt16Dtype",
+ "UInt32Dtype",
+ "UInt64Dtype",
+ "UInt64Index",
+ "UInt8Dtype",
+ "unique",
+ "value_counts",
+]
diff --git a/pandas/core/arrays/arrow/__init__.py b/pandas/core/arrays/arrow/__init__.py
index 6bdf29e38ac62..58b268cbdd221 100644
--- a/pandas/core/arrays/arrow/__init__.py
+++ b/pandas/core/arrays/arrow/__init__.py
@@ -1,3 +1,3 @@
-# flake8: noqa: F401
-
from pandas.core.arrays.arrow.array import ArrowExtensionArray
+
+__all__ = ["ArrowExtensionArray"]
diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py
index 18294ead0329d..56dbc6df54fc9 100644
--- a/pandas/core/arrays/sparse/__init__.py
+++ b/pandas/core/arrays/sparse/__init__.py
@@ -1,5 +1,3 @@
-# flake8: noqa: F401
-
from pandas.core.arrays.sparse.accessor import (
SparseAccessor,
SparseFrameAccessor,
@@ -11,3 +9,13 @@
make_sparse_index,
)
from pandas.core.arrays.sparse.dtype import SparseDtype
+
+__all__ = [
+ "BlockIndex",
+ "IntIndex",
+ "make_sparse_index",
+ "SparseAccessor",
+ "SparseArray",
+ "SparseDtype",
+ "SparseFrameAccessor",
+]
diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py
index bb6bfda183802..e6a59bf12d7cc 100644
--- a/pandas/core/dtypes/api.py
+++ b/pandas/core/dtypes/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas.core.dtypes.common import (
is_array_like,
is_bool,
@@ -43,3 +41,47 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
+
+__all__ = [
+ "is_array_like",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_dict_like",
+ "is_dtype_equal",
+ "is_extension_array_dtype",
+ "is_extension_type",
+ "is_file_like",
+ "is_float",
+ "is_float_dtype",
+ "is_hashable",
+ "is_int64_dtype",
+ "is_integer",
+ "is_integer_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_iterator",
+ "is_list_like",
+ "is_named_tuple",
+ "is_number",
+ "is_numeric_dtype",
+ "is_object_dtype",
+ "is_period_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_scalar",
+ "is_signed_integer_dtype",
+ "is_sparse",
+ "is_string_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "pandas_dtype",
+]
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 378f33e2b65ac..c10461b2fc7f8 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -36,7 +36,7 @@
ABCCategorical,
ABCIndex,
)
-from pandas.core.dtypes.inference import ( # noqa:F401
+from pandas.core.dtypes.inference import (
is_array_like,
is_bool,
is_complex,
@@ -1814,3 +1814,70 @@ def is_all_strings(value: ArrayLike) -> bool:
elif isinstance(dtype, CategoricalDtype):
return dtype.categories.inferred_type == "string"
return dtype == "string"
+
+
+__all__ = [
+ "classes",
+ "classes_and_not_datetimelike",
+ "DT64NS_DTYPE",
+ "ensure_float",
+ "ensure_float64",
+ "ensure_python_int",
+ "ensure_str",
+ "get_dtype",
+ "infer_dtype_from_object",
+ "INT64_DTYPE",
+ "is_1d_only_ea_dtype",
+ "is_1d_only_ea_obj",
+ "is_all_strings",
+ "is_any_int_dtype",
+ "is_array_like",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_dataclass",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_datetimelike_v_numeric",
+ "is_datetime_or_timedelta_dtype",
+ "is_decimal",
+ "is_dict_like",
+ "is_dtype_equal",
+ "is_ea_or_datetimelike_dtype",
+ "is_extension_array_dtype",
+ "is_extension_type",
+ "is_file_like",
+ "is_float_dtype",
+ "is_int64_dtype",
+ "is_integer_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_iterator",
+ "is_named_tuple",
+ "is_nested_list_like",
+ "is_number",
+ "is_numeric_dtype",
+ "is_numeric_v_string_like",
+ "is_object_dtype",
+ "is_period_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_scipy_sparse",
+ "is_sequence",
+ "is_signed_integer_dtype",
+ "is_sparse",
+ "is_string_dtype",
+ "is_string_or_object_np_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "needs_i8_conversion",
+ "pandas_dtype",
+ "TD64NS_DTYPE",
+ "validate_all_hashable",
+]
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 00ca6f9048a40..19e9c6b27e4e7 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1,3 +1,6 @@
+# pyright: reportUnusedImport = false
+from __future__ import annotations
+
import warnings
from pandas.util._exceptions import find_stack_level
@@ -30,3 +33,5 @@
FutureWarning,
stacklevel=find_stack_level(),
)
+
+__all__: list[str] = []
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index 75715bdc90003..ea69b567611e4 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -23,7 +23,6 @@
__all__ = [
"Block",
- "CategoricalBlock",
"NumericBlock",
"DatetimeTZBlock",
"ExtensionBlock",
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 540a557f7c7cc..e9fefd9268870 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -11,7 +11,7 @@
import numpy as np
-from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
+from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas._typing import Level
from pandas.util._decorators import Appender
from pandas.util._exceptions import find_stack_level
@@ -30,7 +30,7 @@
algorithms,
roperator,
)
-from pandas.core.ops.array_ops import ( # noqa:F401
+from pandas.core.ops.array_ops import (
arithmetic_op,
comp_method_OBJECT_ARRAY,
comparison_op,
@@ -38,7 +38,7 @@
logical_op,
maybe_prepare_scalar_for_op,
)
-from pandas.core.ops.common import ( # noqa:F401
+from pandas.core.ops.common import (
get_op_result_name,
unpack_zerodim_and_defer,
)
@@ -47,14 +47,14 @@
_op_descriptions,
make_flex_doc,
)
-from pandas.core.ops.invalid import invalid_comparison # noqa:F401
-from pandas.core.ops.mask_ops import ( # noqa: F401
+from pandas.core.ops.invalid import invalid_comparison
+from pandas.core.ops.mask_ops import (
kleene_and,
kleene_or,
kleene_xor,
)
-from pandas.core.ops.methods import add_flex_arithmetic_methods # noqa:F401
-from pandas.core.roperator import ( # noqa:F401
+from pandas.core.ops.methods import add_flex_arithmetic_methods
+from pandas.core.roperator import (
radd,
rand_,
rdiv,
@@ -473,3 +473,40 @@ def f(self, other, axis=default_axis, level=None):
f.__name__ = op_name
return f
+
+
+__all__ = [
+ "add_flex_arithmetic_methods",
+ "align_method_FRAME",
+ "align_method_SERIES",
+ "ARITHMETIC_BINOPS",
+ "arithmetic_op",
+ "COMPARISON_BINOPS",
+ "comparison_op",
+ "comp_method_OBJECT_ARRAY",
+ "fill_binop",
+ "flex_arith_method_FRAME",
+ "flex_comp_method_FRAME",
+ "flex_method_SERIES",
+ "frame_arith_method_with_reindex",
+ "invalid_comparison",
+ "kleene_and",
+ "kleene_or",
+ "kleene_xor",
+ "logical_op",
+ "maybe_dispatch_ufunc_to_dunder_op",
+ "radd",
+ "rand_",
+ "rdiv",
+ "rdivmod",
+ "rfloordiv",
+ "rmod",
+ "rmul",
+ "ror_",
+ "rpow",
+ "rsub",
+ "rtruediv",
+ "rxor",
+ "should_reindex_frame_op",
+ "unpack_zerodim_and_defer",
+]
diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py
index f100cca5c7615..b1884c497f0ad 100644
--- a/pandas/core/reshape/api.py
+++ b/pandas/core/reshape/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas.core.reshape.concat import concat
from pandas.core.reshape.encoding import (
from_dummies,
@@ -24,3 +22,20 @@
cut,
qcut,
)
+
+__all__ = [
+ "concat",
+ "crosstab",
+ "cut",
+ "from_dummies",
+ "get_dummies",
+ "lreshape",
+ "melt",
+ "merge",
+ "merge_asof",
+ "merge_ordered",
+ "pivot",
+ "pivot_table",
+ "qcut",
+ "wide_to_long",
+]
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 7de34c04a31ed..1ec0e6ca83d8f 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -29,7 +29,7 @@
parsing,
timezones,
)
-from pandas._libs.tslibs.parsing import ( # noqa:F401
+from pandas._libs.tslibs.parsing import (
DateParseError,
format_is_iso,
guess_datetime_format,
@@ -1289,3 +1289,11 @@ def to_time(arg, format=None, infer_time_format=False, errors="raise"):
from pandas.core.tools.times import to_time
return to_time(arg, format, infer_time_format, errors)
+
+
+__all__ = [
+ "DateParseError",
+ "should_cache",
+ "to_datetime",
+ "to_time",
+]
diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py
index 8f42cd782c67f..857e12e5467a6 100644
--- a/pandas/core/window/__init__.py
+++ b/pandas/core/window/__init__.py
@@ -1,13 +1,23 @@
-from pandas.core.window.ewm import ( # noqa:F401
+from pandas.core.window.ewm import (
ExponentialMovingWindow,
ExponentialMovingWindowGroupby,
)
-from pandas.core.window.expanding import ( # noqa:F401
+from pandas.core.window.expanding import (
Expanding,
ExpandingGroupby,
)
-from pandas.core.window.rolling import ( # noqa:F401
+from pandas.core.window.rolling import (
Rolling,
RollingGroupby,
Window,
)
+
+__all__ = [
+ "Expanding",
+ "ExpandingGroupby",
+ "ExponentialMovingWindow",
+ "ExponentialMovingWindowGroupby",
+ "Rolling",
+ "RollingGroupby",
+ "Window",
+]
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6e47c46cc7203..93f07c5d75625 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -108,7 +108,6 @@
)
from pandas.core.generic import NDFrame
from pandas.core.groupby.ops import BaseGrouper
- from pandas.core.internals import Block # noqa:F401
class BaseWindow(SelectionMixin):
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 98a9d2b35f09d..47819ae5fad23 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -5,9 +5,9 @@
import ctypes
-from pandas._config.config import OptionError # noqa:F401
+from pandas._config.config import OptionError
-from pandas._libs.tslibs import ( # noqa:F401
+from pandas._libs.tslibs import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
)
@@ -413,3 +413,35 @@ class CSSWarning(UserWarning):
... .to_excel('styled.xlsx') # doctest: +SKIP
... # CSSWarning: Too many tokens provided to "border" (expected 1-3)
"""
+
+
+__all__ = [
+ "AbstractMethodError",
+ "AccessorRegistrationWarning",
+ "CSSWarning",
+ "DataError",
+ "DtypeWarning",
+ "DuplicateLabelError",
+ "EmptyDataError",
+ "IntCastingNaNError",
+ "InvalidIndexError",
+ "IndexingError",
+ "MergeError",
+ "NullFrequencyError",
+ "NumbaUtilError",
+ "NumExprClobberingError",
+ "OptionError",
+ "OutOfBoundsDatetime",
+ "OutOfBoundsTimedelta",
+ "ParserError",
+ "ParserWarning",
+ "PerformanceWarning",
+ "PyperclipException",
+ "PyperclipWindowsException",
+ "SettingWithCopyError",
+ "SettingWithCopyWarning",
+ "SpecificationError",
+ "UndefinedVariableError",
+ "UnsortedIndexError",
+ "UnsupportedFunctionCall",
+]
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 5926f2166ee9d..4e8b34a61dfc6 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -2,8 +2,6 @@
Data IO api
"""
-# flake8: noqa
-
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import (
ExcelFile,
@@ -38,3 +36,30 @@
)
from pandas.io.stata import read_stata
from pandas.io.xml import read_xml
+
+__all__ = [
+ "ExcelFile",
+ "ExcelWriter",
+ "HDFStore",
+ "read_clipboard",
+ "read_csv",
+ "read_excel",
+ "read_feather",
+ "read_fwf",
+ "read_gbq",
+ "read_hdf",
+ "read_html",
+ "read_json",
+ "read_orc",
+ "read_parquet",
+ "read_pickle",
+ "read_sas",
+ "read_spss",
+ "read_sql",
+ "read_sql_query",
+ "read_sql_table",
+ "read_stata",
+ "read_table",
+ "read_xml",
+ "to_pickle",
+]
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index d28309cda6788..ed0e0a99ec43b 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -151,7 +151,7 @@ def __init__(self) -> None:
import pyarrow.parquet
# import utils to register the pyarrow extension types
- import pandas.core.arrays.arrow._arrow_utils # noqa:F401
+ import pandas.core.arrays.arrow._arrow_utils # pyright: ignore # noqa:F401
self.api = pyarrow
diff --git a/pandas/io/sas/__init__.py b/pandas/io/sas/__init__.py
index 71027fd064f3d..317730745b6e3 100644
--- a/pandas/io/sas/__init__.py
+++ b/pandas/io/sas/__init__.py
@@ -1 +1,3 @@
-from pandas.io.sas.sasreader import read_sas # noqa:F401
+from pandas.io.sas.sasreader import read_sas
+
+__all__ = ["read_sas"]
diff --git a/pandas/tests/tseries/offsets/conftest.py b/pandas/tests/tseries/offsets/conftest.py
index df68c98dca43f..72f5c4a519a3a 100644
--- a/pandas/tests/tseries/offsets/conftest.py
+++ b/pandas/tests/tseries/offsets/conftest.py
@@ -5,7 +5,11 @@
import pandas.tseries.offsets as offsets
-@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__])
+@pytest.fixture(
+ params=[
+ getattr(offsets, o) for o in offsets.__all__ if o not in ("Tick", "BaseOffset")
+ ]
+)
def offset_types(request):
"""
Fixture for all the datetime offsets available for a time series.
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index 59666fa0048dd..e274838d45b27 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -2,7 +2,7 @@
Timeseries API
"""
-# flake8: noqa:F401
-
from pandas.tseries.frequencies import infer_freq
import pandas.tseries.offsets as offsets
+
+__all__ = ["infer_freq", "offsets"]
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index a4fe2161983b6..b2fbc022b2708 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -22,7 +22,7 @@
build_field_sarray,
month_position_check,
)
-from pandas._libs.tslibs.offsets import ( # noqa:F401
+from pandas._libs.tslibs.offsets import (
BaseOffset,
DateOffset,
Day,
@@ -647,3 +647,14 @@ def _is_monthly(rule: str) -> bool:
def _is_weekly(rule: str) -> bool:
rule = rule.upper()
return rule == "W" or rule.startswith("W-")
+
+
+__all__ = [
+ "Day",
+ "get_offset",
+ "get_period_alias",
+ "infer_freq",
+ "is_subperiod",
+ "is_superperiod",
+ "to_offset",
+]
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 6fd49e2340e30..6426dbcd54489 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -6,7 +6,7 @@
)
import warnings
-from dateutil.relativedelta import ( # noqa:F401
+from dateutil.relativedelta import (
FR,
MO,
SA,
@@ -582,3 +582,27 @@ def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
+
+
+__all__ = [
+ "after_nearest_workday",
+ "before_nearest_workday",
+ "FR",
+ "get_calendar",
+ "HolidayCalendarFactory",
+ "MO",
+ "nearest_workday",
+ "next_monday",
+ "next_monday_or_tuesday",
+ "next_workday",
+ "previous_friday",
+ "previous_workday",
+ "register",
+ "SA",
+ "SU",
+ "sunday_to_monday",
+ "TH",
+ "TU",
+ "WE",
+ "weekend_to_monday",
+]
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index cee99d23f8d90..b995c6ac78b80 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,4 +1,4 @@
-from pandas._libs.tslibs.offsets import ( # noqa:F401
+from pandas._libs.tslibs.offsets import (
FY5253,
BaseOffset,
BDay,
@@ -45,9 +45,14 @@
__all__ = [
"Day",
+ "BaseOffset",
"BusinessDay",
+ "BusinessMonthBegin",
+ "BusinessMonthEnd",
"BDay",
"CustomBusinessDay",
+ "CustomBusinessMonthBegin",
+ "CustomBusinessMonthEnd",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
@@ -73,6 +78,7 @@
"Week",
"WeekOfMonth",
"Easter",
+ "Tick",
"Hour",
"Minute",
"Second",
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index 7adfca73c2f1e..6e6006dd28165 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -1,3 +1,4 @@
+# pyright: reportUnusedImport = false
from pandas.util._decorators import ( # noqa:F401
Appender,
Substitution,
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 978f2982e6d18..cec4ee40a8c7a 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -11,7 +11,7 @@
)
import warnings
-from pandas._libs.properties import cache_readonly # noqa:F401
+from pandas._libs.properties import cache_readonly
from pandas._typing import F
from pandas.util._exceptions import find_stack_level
@@ -498,3 +498,16 @@ def indent(text: str | None, indents: int = 1) -> str:
return ""
jointext = "".join(["\n"] + [" "] * indents)
return jointext.join(text.split("\n"))
+
+
+__all__ = [
+ "Appender",
+ "cache_readonly",
+ "deprecate",
+ "deprecate_kwarg",
+ "deprecate_nonkeyword_arguments",
+ "doc",
+ "future_version_msg",
+ "rewrite_axis_style_signature",
+ "Substitution",
+]
diff --git a/pyproject.toml b/pyproject.toml
index 0e2e41fba461c..6ca37581b03f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -166,6 +166,7 @@ reportPropertyTypeMismatch = true
reportUntypedClassDecorator = true
reportUntypedFunctionDecorator = true
reportUntypedNamedTuple = true
+reportUnusedImport = true
# disable subset of "basic"
reportGeneralTypeIssues = false
reportMissingModuleSource = false
@@ -176,4 +177,3 @@ reportOptionalOperand = false
reportOptionalSubscript = false
reportPrivateImportUsage = false
reportUnboundVariable = false
-reportUnsupportedDunderAll = false
| pyright's reportUnusedImport checks py and pyi files for unused imports. If an import is explicitly marked as public (re-exported using `as` or in `__all__`) it is "used" (flake8 seems to use the same definition).
Adding unused imports to `__all__` becomes messy when the file does not yet have `__all__`: need to list also all public symbols (constants, classes, functions). | https://api.github.com/repos/pandas-dev/pandas/pulls/46937 | 2022-05-04T03:34:35Z | 2022-07-10T00:11:59Z | 2022-07-10T00:11:59Z | 2022-09-21T15:28:31Z |
Add/reorganize scalar Timedelta tests | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 4eb1494c4d56c..5f4a5d144eb8c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1,4 +1,5 @@
import collections
+import operator
import warnings
cimport cython
@@ -41,6 +42,7 @@ from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
c_nat_strings as nat_strings,
checknull_with_nat,
+ is_td64nat,
)
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
@@ -55,6 +57,7 @@ from pandas._libs.tslibs.np_datetime cimport (
pandas_timedelta_to_timedeltastruct,
pandas_timedeltastruct,
)
+from pandas._libs.util cimport INT64_MAX
from pandas._libs.tslibs.np_datetime import OutOfBoundsTimedelta
@@ -216,13 +219,12 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
+ delta.seconds * 1_000_000
+ delta.microseconds
) * 1000
- except OverflowError as err:
- raise OutOfBoundsTimedelta(*err.args) from err
-
+ except OverflowError as ex:
+ msg = f"{delta} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from ex
raise TypeError(type(delta))
-@cython.overflowcheck(True)
cdef object ensure_td64ns(object ts):
"""
Overflow-safe implementation of td64.astype("m8[ns]")
@@ -241,24 +243,20 @@ cdef object ensure_td64ns(object ts):
str unitstr
td64_unit = get_datetime64_unit(ts)
- if (
- td64_unit != NPY_DATETIMEUNIT.NPY_FR_ns
- and td64_unit != NPY_DATETIMEUNIT.NPY_FR_GENERIC
- ):
- unitstr = npy_unit_to_abbrev(td64_unit)
+ if td64_unit == NPY_DATETIMEUNIT.NPY_FR_ns or td64_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
+ return ts
- td64_value = get_timedelta64_value(ts)
+ unitstr = npy_unit_to_abbrev(td64_unit)
+ mult = precision_from_unit(unitstr)[0]
- mult = precision_from_unit(unitstr)[0]
+ with cython.overflowcheck(True):
try:
- # NB: cython#1381 this cannot be *=
- td64_value = td64_value * mult
- except OverflowError as err:
- raise OutOfBoundsTimedelta(ts) from err
+ td64_value = get_timedelta64_value(ts) * mult
+ except OverflowError as ex:
+ msg = f"{ts} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from ex
- return np.timedelta64(td64_value, "ns")
-
- return ts
+ return np.timedelta64(td64_value, "ns")
cdef convert_to_timedelta64(object ts, str unit):
@@ -674,8 +672,7 @@ cdef bint _validate_ops_compat(other):
def _op_unary_method(func, name):
def f(self):
- new_value = func(self.value)
- return _timedelta_from_value_and_reso(new_value, self._reso)
+ return create_timedelta(func(self.value), "ignore", self._reso)
f.__name__ = name
return f
@@ -724,13 +721,7 @@ def _binary_op_method_timedeltalike(op, name):
if self._reso != other._reso:
raise NotImplementedError
- res = op(self.value, other.value)
- if res == NPY_NAT:
- # e.g. test_implementation_limits
- # TODO: more generally could do an overflowcheck in op?
- return NaT
-
- return _timedelta_from_value_and_reso(res, reso=self._reso)
+ return create_timedelta(op(self.value, other.value), "ignore", self._reso)
f.__name__ = name
return f
@@ -861,7 +852,7 @@ cdef _to_py_int_float(v):
def _timedelta_unpickle(value, reso):
- return _timedelta_from_value_and_reso(value, reso)
+ return create_timedelta(value, "ignore", reso)
cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
@@ -892,6 +883,49 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
return td_base
+@cython.overflowcheck(True)
+cdef object create_timedelta(object value, str in_unit, NPY_DATETIMEUNIT out_reso):
+ """
+ Timedelta factory.
+
+ Timedelta.__new__ just does arg validation (at least currently). Also, some internal
+ functions expect to be able to create non-nano reso Timedeltas, but Timedelta.__new__
+ doesn't yet expose that.
+
+ _timedelta_from_value_and_reso does, but only accepts limited args, and doesn't check for overflow.
+ """
+ cdef:
+ int64_t out_value
+
+ if isinstance(value, _Timedelta):
+ return value
+
+ try:
+ # if unit == "ns", no need to create an m8[ns] just to read the (same) value back
+ # if unit == "ignore", assume caller wants to invoke an overflow-safe version of
+ # _timedelta_from_value_and_reso, and that any float rounding is acceptable
+ if (is_integer_object(value) or is_float_object(value)) and (in_unit == "ns" or in_unit == "ignore"):
+ if util.is_nan(value):
+ return NaT
+ out_value = <int64_t>value
+ elif is_timedelta64_object(value):
+ out_value = ensure_td64ns(value).view(np.int64)
+ elif isinstance(value, str):
+ if value.startswith(("P", "-P")):
+ out_value = parse_iso_format_string(value)
+ else:
+ out_value = parse_timedelta_string(value)
+ else:
+ out_value = convert_to_timedelta64(value, in_unit).view(np.int64)
+ except OverflowError as ex:
+ msg = f"{value} outside allowed range [{NPY_NAT + 1}ns, {INT64_MAX}ns]"
+ raise OutOfBoundsTimedelta(msg) from ex
+
+ if out_value == NPY_NAT:
+ return NaT
+ return _timedelta_from_value_and_reso(out_value, out_reso)
+
+
# Similar to Timestamp/datetime, this is a construction requirement for
# timedeltas that we need to do object instantiation in python. This will
# serve as a C extension type that shadows the Python class, where we do any
@@ -1375,7 +1409,7 @@ cdef class _Timedelta(timedelta):
@classmethod
def _from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso):
# exposing as classmethod for testing
- return _timedelta_from_value_and_reso(value, reso)
+ return create_timedelta(value, "ignore", reso)
# Python front end to C extension type _Timedelta
@@ -1438,99 +1472,52 @@ class Timedelta(_Timedelta):
We see that either way we get the same result
"""
- _req_any_kwargs_new = {"weeks", "days", "hours", "minutes", "seconds",
- "milliseconds", "microseconds", "nanoseconds"}
+ _allowed_kwargs = (
+ "weeks", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds"
+ )
def __new__(cls, object value=_no_input, unit=None, **kwargs):
- cdef _Timedelta td_base
+ cdef:
+ _Timedelta td_base
+ NPY_DATETIMEUNIT out_reso = NPY_FR_ns
+ # process kwargs iff no value passed
if value is _no_input:
- if not len(kwargs):
- raise ValueError("cannot construct a Timedelta without a "
- "value/unit or descriptive keywords "
- "(days,seconds....)")
-
- kwargs = {key: _to_py_int_float(kwargs[key]) for key in kwargs}
-
- unsupported_kwargs = set(kwargs)
- unsupported_kwargs.difference_update(cls._req_any_kwargs_new)
- if unsupported_kwargs or not cls._req_any_kwargs_new.intersection(kwargs):
+ if not kwargs:
+ raise ValueError(
+ "cannot construct a Timedelta without a value/unit "
+ "or descriptive keywords (days,seconds....)"
+ )
+ if not kwargs.keys() <= set(cls._allowed_kwargs):
raise ValueError(
"cannot construct a Timedelta from the passed arguments, "
- "allowed keywords are "
- "[weeks, days, hours, minutes, seconds, "
- "milliseconds, microseconds, nanoseconds]"
+ f"allowed keywords are {cls._allowed_kwargs}"
)
-
- # GH43764, convert any input to nanoseconds first and then
- # create the timestamp. This ensures that any potential
- # nanosecond contributions from kwargs parsed as floats
- # are taken into consideration.
- seconds = int((
+ # GH43764, convert any input to nanoseconds first, to ensure any potential
+ # nanosecond contributions from kwargs parsed as floats are included
+ kwargs = collections.defaultdict(int, {key: _to_py_int_float(val) for key, val in kwargs.items()})
+ ns = sum(
(
- (kwargs.get('days', 0) + kwargs.get('weeks', 0) * 7) * 24
- + kwargs.get('hours', 0)
- ) * 3600
- + kwargs.get('minutes', 0) * 60
- + kwargs.get('seconds', 0)
- ) * 1_000_000_000
- )
-
- value = np.timedelta64(
- int(kwargs.get('nanoseconds', 0))
- + int(kwargs.get('microseconds', 0) * 1_000)
- + int(kwargs.get('milliseconds', 0) * 1_000_000)
- + seconds
+ kwargs["weeks"] * 7 * 24 * 3600 * 1_000_000_000,
+ kwargs["days"] * 24 * 3600 * 1_000_000_000,
+ kwargs["hours"] * 3600 * 1_000_000_000,
+ kwargs["minutes"] * 60 * 1_000_000_000,
+ kwargs["seconds"] * 1_000_000_000,
+ kwargs["milliseconds"] * 1_000_000,
+ kwargs["microseconds"] * 1_000,
+ kwargs["nanoseconds"],
+ )
)
+ return create_timedelta(ns, "ns", out_reso)
- if unit in {'Y', 'y', 'M'}:
+ if isinstance(value, str) and unit is not None:
+ raise ValueError("unit must not be specified if the value is a str")
+ elif unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
-
- # GH 30543 if pd.Timedelta already passed, return it
- # check that only value is passed
- if isinstance(value, _Timedelta) and unit is None and len(kwargs) == 0:
- return value
- elif isinstance(value, _Timedelta):
- value = value.value
- elif isinstance(value, str):
- if unit is not None:
- raise ValueError("unit must not be specified if the value is a str")
- if (len(value) > 0 and value[0] == 'P') or (
- len(value) > 1 and value[:2] == '-P'
- ):
- value = parse_iso_format_string(value)
- else:
- value = parse_timedelta_string(value)
- value = np.timedelta64(value)
- elif PyDelta_Check(value):
- value = convert_to_timedelta64(value, 'ns')
- elif is_timedelta64_object(value):
- value = ensure_td64ns(value)
- elif is_tick_object(value):
- value = np.timedelta64(value.nanos, 'ns')
- elif is_integer_object(value) or is_float_object(value):
- # unit=None is de-facto 'ns'
- unit = parse_timedelta_unit(unit)
- value = convert_to_timedelta64(value, unit)
- elif checknull_with_nat(value):
- return NaT
- else:
- raise ValueError(
- "Value must be Timedelta, string, integer, "
- f"float, timedelta or convertible, not {type(value).__name__}"
- )
-
- if is_timedelta64_object(value):
- value = value.view('i8')
-
- # nat
- if value == NPY_NAT:
- return NaT
-
- return _timedelta_from_value_and_reso(value, NPY_FR_ns)
+ return create_timedelta(value, parse_timedelta_unit(unit), out_reso)
def __setstate__(self, state):
if len(state) == 1:
@@ -1607,30 +1594,25 @@ class Timedelta(_Timedelta):
# Arithmetic Methods
# TODO: Can some of these be defined in the cython class?
- __neg__ = _op_unary_method(lambda x: -x, '__neg__')
- __pos__ = _op_unary_method(lambda x: x, '__pos__')
- __abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
+ __neg__ = _op_unary_method(operator.neg, "__neg__")
+ __pos__ = _op_unary_method(operator.pos, "__pos__")
+ __abs__ = _op_unary_method(operator.abs, "__abs__")
- __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__')
- __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__')
- __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__')
- __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__')
+ __add__ = _binary_op_method_timedeltalike(operator.add, "__add__")
+ __radd__ = _binary_op_method_timedeltalike(operator.add, "__radd__")
+ __sub__ = _binary_op_method_timedeltalike(operator.sub, "__sub__")
+ __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, "__rsub__")
def __mul__(self, other):
- if is_integer_object(other) or is_float_object(other):
- if util.is_nan(other):
- # np.nan * timedelta -> np.timedelta64("NaT"), in this case NaT
- return NaT
-
- return _timedelta_from_value_and_reso(
- <int64_t>(other * self.value),
- reso=self._reso,
- )
-
- elif is_array(other):
+ if util.is_nan(other):
+ # np.nan * timedelta -> np.timedelta64("NaT"), in this case NaT
+ return NaT
+ if is_array(other):
# ndarray-like
return other * self.to_timedelta64()
-
+ if is_integer_object(other) or is_float_object(other):
+ # can't call Timedelta b/c it doesn't (yet) expose reso
+ return create_timedelta(self.value * other, "ignore", self._reso)
return NotImplemented
__rmul__ = __mul__
@@ -1825,6 +1807,6 @@ cdef _broadcast_floordiv_td64(
# resolution in ns
-Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1)
-Timedelta.max = Timedelta(np.iinfo(np.int64).max)
+Timedelta.min = Timedelta(NPY_NAT + 1)
+Timedelta.max = Timedelta(INT64_MAX)
Timedelta.resolution = Timedelta(nanoseconds=1)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index abdb4aebb625f..57a20290a9982 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -90,7 +90,10 @@ from pandas._libs.tslibs.np_datetime cimport (
pydatetime_to_dt64,
)
-from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
+from pandas._libs.tslibs.np_datetime import (
+ OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
+)
from pandas._libs.tslibs.offsets cimport (
BaseOffset,
@@ -391,6 +394,9 @@ cdef class _Timestamp(ABCTimestamp):
return NotImplemented
def __sub__(self, other):
+ # nb: counterintuitive semantics of __sub__, __rsub__ for cython < 3.x
+ # github.com/cython/cython/blob/2795a4/docs/src/userguide/special_methods.rst#arithmetic-methods
+ # GH#28286
if isinstance(self, _Timestamp) and self._reso != NPY_FR_ns:
raise NotImplementedError(self._reso)
@@ -435,20 +441,17 @@ cdef class _Timestamp(ABCTimestamp):
# Timedelta
try:
return Timedelta(self.value - other.value)
- except (OverflowError, OutOfBoundsDatetime) as err:
- if isinstance(other, _Timestamp):
- if both_timestamps:
- raise OutOfBoundsDatetime(
- "Result is too large for pandas.Timedelta. Convert inputs "
- "to datetime.datetime with 'Timestamp.to_pydatetime()' "
- "before subtracting."
- ) from err
+ except OutOfBoundsTimedelta as err:
+ if both_timestamps:
+ raise OutOfBoundsTimedelta(
+ "Result is too large for pandas.Timedelta. Convert inputs "
+ "to datetime.datetime with 'Timestamp.to_pydatetime()' "
+ "before subtracting."
+ ) from err
# We get here in stata tests, fall back to stdlib datetime
# method and return stdlib timedelta object
pass
elif is_datetime64_object(self):
- # GH#28286 cython semantics for __rsub__, `other` is actually
- # the Timestamp
# TODO(cython3): remove this, this moved to __rsub__
return type(other)(self) - other
@@ -461,7 +464,7 @@ cdef class _Timestamp(ABCTimestamp):
if PyDateTime_Check(other):
try:
return type(self)(other) - self
- except (OverflowError, OutOfBoundsDatetime) as err:
+ except (OverflowError, OutOfBoundsDatetime, OutOfBoundsTimedelta) as err:
# We get here in stata tests, fall back to stdlib datetime
# method and return stdlib timedelta object
pass
diff --git a/pandas/tests/scalar/timedelta/conftest.py b/pandas/tests/scalar/timedelta/conftest.py
new file mode 100644
index 0000000000000..a2398c06d5e75
--- /dev/null
+++ b/pandas/tests/scalar/timedelta/conftest.py
@@ -0,0 +1,10 @@
+import re
+
+import pytest
+
+
+@pytest.fixture(name="td_overflow_msg")
+def fixture_td_overflow_msg() -> str:
+ return re.escape(
+ "outside allowed range [-9223372036854775807ns, 9223372036854775807ns]"
+ )
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 74aa7f045088e..3d0dc54636216 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -1,34 +1,159 @@
"""
-Tests for scalar Timedelta arithmetic ops
+Tests of binary ops between a Timedelta scalar and another scalar or a
+Array/Index/Series/DataFrame.
+
+See test_timedelta.py, in this same directory, for tests against the rest of the public
+Timedelta API.
"""
+
+from __future__ import annotations
+
from datetime import (
datetime,
timedelta,
)
import operator
+import re
import numpy as np
import pytest
-from pandas.errors import OutOfBoundsTimedelta
+from pandas._libs.tslibs import (
+ NaTType,
+ OutOfBoundsTimedelta,
+)
import pandas as pd
from pandas import (
+ NA,
NaT,
Timedelta,
Timestamp,
+ compat,
offsets,
)
import pandas._testing as tm
from pandas.core import ops
-class TestTimedeltaAdditionSubtraction:
+@pytest.fixture(name="tdlike_cls", params=(Timedelta, timedelta, np.timedelta64))
+def fixture_tdlike_cls(request) -> type:
+ return request.param
+
+
+# Tick, too?
+@pytest.fixture(
+ name="tdlike_or_offset_cls",
+ params=(Timedelta, timedelta, np.timedelta64, offsets.Nano),
+)
+def fixture_tdlike_or_offset_cls(request) -> type:
+ return request.param
+
+
+@pytest.fixture(name="ten_days")
+def fixture_ten_days() -> Timedelta:
+ return Timedelta(days=10)
+
+
+@pytest.fixture(name="y2k", params=(Timestamp, np.datetime64, datetime.fromisoformat))
+def fixture_y2k(request):
+ return request.param("2000-01-01")
+
+
+@pytest.fixture(name="one_day")
+def fixture_one_day(tdlike_cls: type):
+ if tdlike_cls is np.timedelta64:
+ return np.timedelta64(1, "D")
+ return tdlike_cls(days=1)
+
+
+@pytest.fixture(
+ name="na_value",
+ params=(None, np.nan, np.float64("NaN"), NaT, NA),
+ ids=("None", "np.nan", "np.float64('NaN')", "NaT", "NA"),
+)
+def fixture_na_value(request):
+ return request.param
+
+
+@pytest.fixture(name="add_op", params=(operator.add, ops.radd))
+def fixture_add_op(request):
+ return request.param
+
+
+@pytest.fixture(name="sub_op", params=(operator.sub, ops.rsub))
+def fixture_sub_op(request):
+ return request.param
+
+
+@pytest.fixture(
+ name="add_or_sub",
+ params=(operator.add, ops.radd, operator.sub, ops.rsub),
+)
+def fixture_add_or_sub(request):
+ return request.param
+
+
+@pytest.fixture(name="mul_op", params=(operator.mul, ops.rmul))
+def fixture_mul_op(request):
+ return request.param
+
+
+@pytest.fixture(name="truediv_op", params=(operator.truediv, ops.rtruediv))
+def fixture_truediv_op(request):
+ return request.param
+
+
+@pytest.fixture(
+ name="floor_mod_divmod_op",
+ params=(
+ operator.floordiv,
+ ops.rfloordiv,
+ operator.mod,
+ ops.rmod,
+ divmod,
+ ops.rdivmod,
+ ),
+)
+def fixture_floor_mod_divmod_op(request):
+ return request.param
+
+
+@pytest.fixture(name="invalid_op_msg")
+def fixture_invalid_op_msg() -> str:
+ messages = (
+ "cannot use operands with types",
+ "Concatenation operation is not implemented for NumPy arrays",
+ "cannot perform",
+ "not supported between instances of 'Timedelta' and ",
+ re.escape("unsupported operand type(s)"),
+ )
+ return "|".join(messages)
+
+
+xfail_type_error = pytest.mark.xfail(
+ reason="unsupported",
+ raises=TypeError,
+ strict=True,
+)
+
+
+def test_binary_ops_not_implemented_for_arbitrary_types(
+ ten_days: Timedelta,
+ invalid_op_msg: str,
+ all_binary_operators,
+):
+ if all_binary_operators not in (operator.eq, operator.ne):
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ all_binary_operators(ten_days, object())
+
+
+class TestAdditionSubtractionScalar:
"""
- Tests for Timedelta methods:
+ Tests against the following Timedelta methods, where second operand is a scalar:
- __add__, __radd__,
- __sub__, __rsub__
+ __add__,__radd__,
+ __sub__,__rsub__
"""
@pytest.mark.parametrize(
@@ -41,7 +166,7 @@ class TestTimedeltaAdditionSubtraction:
offsets.Second(10),
],
)
- def test_td_add_sub_ten_seconds(self, ten_seconds):
+ def test_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
@@ -63,7 +188,7 @@ def test_td_add_sub_ten_seconds(self, ten_seconds):
offsets.Day() + offsets.Second(10),
],
)
- def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
+ def test_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
@@ -75,182 +200,129 @@ def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
result = base - one_day_ten_secs
assert result == expected_sub
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_datetimelike_scalar(self, op):
+ @pytest.mark.parametrize("value", (2, 2.0), ids=("int", "float"))
+ def test_add_or_sub_numeric_raises(
+ self,
+ ten_days: Timedelta,
+ add_or_sub,
+ value,
+ invalid_op_msg: str,
+ ):
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ add_or_sub(ten_days, value)
+
+ def test_add_datetimelike(self, ten_days: Timedelta, add_op, y2k):
# GH#19738
- td = Timedelta(10, unit="d")
+ result = add_op(y2k, ten_days)
+ expected = Timestamp("2000-01-11")
- result = op(td, datetime(2016, 1, 1))
- if op is operator.add:
+ if type(y2k) != datetime and add_op != ops.radd:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
- assert result == Timestamp(2016, 1, 11)
-
- result = op(td, Timestamp("2018-01-12 18:09"))
- assert isinstance(result, Timestamp)
- assert result == Timestamp("2018-01-22 18:09")
-
- result = op(td, np.datetime64("2018-01-12"))
- assert isinstance(result, Timestamp)
- assert result == Timestamp("2018-01-22")
-
- result = op(td, NaT)
- assert result is NaT
-
- def test_td_add_timestamp_overflow(self):
- msg = "int too (large|big) to convert"
- with pytest.raises(OverflowError, match=msg):
- Timestamp("1700-01-01") + Timedelta(13 * 19999, unit="D")
-
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
- Timestamp("1700-01-01") + timedelta(days=13 * 19999)
-
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_td(self, op):
- td = Timedelta(10, unit="d")
-
- result = op(td, Timedelta(days=10))
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=20)
-
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_pytimedelta(self, op):
- td = Timedelta(10, unit="d")
- result = op(td, timedelta(days=9))
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=19)
-
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_timedelta64(self, op):
- td = Timedelta(10, unit="d")
- result = op(td, np.timedelta64(-4, "D"))
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=6)
+ assert result == expected
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_offset(self, op):
- td = Timedelta(10, unit="d")
+ def test_sub_datetimelike(self, ten_days: Timedelta, y2k, invalid_op_msg: str):
+ assert y2k - ten_days == Timestamp("1999-12-22")
- result = op(td, offsets.Hour(6))
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=10, hours=6)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ ten_days - y2k
- def test_td_sub_td(self):
- td = Timedelta(10, unit="d")
- expected = Timedelta(0, unit="ns")
- result = td - td
+ def test_add_timedeltalike(self, ten_days: Timedelta, add_op, one_day):
+ result = add_op(ten_days, one_day)
+ expected = Timedelta(days=11)
assert isinstance(result, Timedelta)
assert result == expected
- def test_td_sub_pytimedelta(self):
- td = Timedelta(10, unit="d")
- expected = Timedelta(0, unit="ns")
-
- result = td - td.to_pytimedelta()
+ def test_sub_timedeltalike(self, ten_days: Timedelta, sub_op, one_day):
+ result = sub_op(ten_days, one_day)
+ expected = Timedelta(days=9) if sub_op is operator.sub else Timedelta(days=-9)
assert isinstance(result, Timedelta)
assert result == expected
- result = td.to_pytimedelta() - td
+ def test_add_offset(self, ten_days: Timedelta, add_op):
+ result = add_op(ten_days, offsets.Hour(6))
+ expected = Timedelta(days=10, hours=6)
assert isinstance(result, Timedelta)
assert result == expected
- def test_td_sub_timedelta64(self):
- td = Timedelta(10, unit="d")
- expected = Timedelta(0, unit="ns")
-
- result = td - td.to_timedelta64()
- assert isinstance(result, Timedelta)
- assert result == expected
+ def test_sub_offset(self, ten_days: Timedelta, sub_op):
+ result = sub_op(ten_days, offsets.Hour(1))
+ if sub_op is operator.sub:
+ expected = Timedelta(hours=239)
+ else:
+ expected = Timedelta(hours=-239)
- result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
- def test_td_sub_nat(self):
- # In this context pd.NaT is treated as timedelta-like
- td = Timedelta(10, unit="d")
- result = td - NaT
- assert result is NaT
-
- def test_td_sub_td64_nat(self):
- td = Timedelta(10, unit="d")
- td_nat = np.timedelta64("NaT")
-
- result = td - td_nat
- assert result is NaT
-
- result = td_nat - td
+ def test_add_sub_tdlike_raises_for_any_result_above_td_max(
+ self,
+ tdlike_or_offset_cls,
+ td_overflow_msg: str,
+ ):
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta.max + tdlike_or_offset_cls(1)
+
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta.max - (tdlike_or_offset_cls(-1))
+
+ def test_add_sub_tdlike_raises_no_error_for_result_1ns_below_td_min(self):
+ assert Timedelta.min + Timedelta(-1, "ns") is NaT
+ assert offsets.Nano(-1) + Timedelta.min is NaT
+ assert Timedelta.min - np.timedelta64(1, "ns") is NaT
+
+ def test_add_sub_tdlike_raises_for_any_result_2ns_below_td_min(
+ self,
+ tdlike_or_offset_cls: type,
+ td_overflow_msg: str,
+ ):
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta.min + tdlike_or_offset_cls(-2)
+
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta.min - tdlike_or_offset_cls(2)
+
+ def test_add_or_sub_na(self, request, ten_days: Timedelta, add_or_sub, na_value):
+ if na_value is NA:
+ request.applymarker(xfail_type_error)
+ result = add_or_sub(ten_days, na_value)
assert result is NaT
- def test_td_sub_offset(self):
- td = Timedelta(10, unit="d")
- result = td - offsets.Hour(1)
- assert isinstance(result, Timedelta)
- assert result == Timedelta(239, unit="h")
-
- def test_td_add_sub_numeric_raises(self):
- td = Timedelta(10, unit="d")
- msg = "unsupported operand type"
- for other in [2, 2.0, np.int64(2), np.float64(2)]:
- with pytest.raises(TypeError, match=msg):
- td + other
- with pytest.raises(TypeError, match=msg):
- other + td
- with pytest.raises(TypeError, match=msg):
- td - other
- with pytest.raises(TypeError, match=msg):
- other - td
-
- def test_td_add_sub_int_ndarray(self):
- td = Timedelta("1 day")
- other = np.array([1])
-
- msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
- with pytest.raises(TypeError, match=msg):
- td + np.array([1])
-
- msg = "|".join(
- [
- (
- r"unsupported operand type\(s\) for \+: 'numpy.ndarray' "
- "and 'Timedelta'"
- ),
- # This message goes on to say "Please do not rely on this error;
- # it may not be given on all Python implementations"
- "Concatenation operation is not implemented for NumPy arrays",
- ]
- )
- with pytest.raises(TypeError, match=msg):
- other + td
- msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
- with pytest.raises(TypeError, match=msg):
- td - other
- msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
- with pytest.raises(TypeError, match=msg):
- other - td
- def test_td_rsub_nat(self):
- td = Timedelta(10, unit="d")
- result = NaT - td
- assert result is NaT
+class TestAdditionSubtractionBox:
+ """
+ Tests against the following Timedelta methods, where second operand is a
+ Array/Index/Series/DataFrame:
- result = np.datetime64("NaT") - td
- assert result is NaT
+ __add__,__radd__,
+ __sub__,__rsub__
+ """
- def test_td_rsub_offset(self):
- result = offsets.Hour(1) - Timedelta(10, unit="d")
- assert isinstance(result, Timedelta)
- assert result == Timedelta(-239, unit="h")
+ @pytest.mark.parametrize("value", (2, 2.0), ids=("int", "float"))
+ def test_add_or_sub_numeric_raises(
+ self,
+ ten_days: Timedelta,
+ add_or_sub,
+ box_with_array,
+ value,
+ invalid_op_msg: str,
+ ):
+ other = tm.box_expected([value], box_with_array)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ add_or_sub(ten_days, other)
+
+ def test_add_datetimelike(self):
+ pass
- def test_td_sub_timedeltalike_object_dtype_array(self):
+ def test_sub_from_datetimelike(self, ten_days: Timedelta, box_with_array):
# GH#21980
- arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
- exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
- res = arr - Timedelta("1D")
- tm.assert_numpy_array_equal(res, exp)
+ other = tm.box_expected([np.datetime64("2000-01-11")], box_with_array)
+ expected = tm.box_expected([np.datetime64("2000-01-01")], box_with_array)
+ result = other - ten_days
+ tm.assert_equal(result, expected)
- def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
+ def test_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
@@ -264,48 +336,39 @@ def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
- def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
+ def test_rsub_mixed_most_timedeltalike_object_dtype_array(self, invalid_op_msg):
# GH#21980
now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
- msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'"
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(TypeError, match=invalid_op_msg):
Timedelta("1D") - arr
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_timedeltalike_object_dtype_array(self, op):
+ def test_add_timedeltalike_object_dtype_array(self, add_op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
- res = op(arr, Timedelta("1D"))
+ res = add_op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
- @pytest.mark.parametrize("op", [operator.add, ops.radd])
- def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
+ def test_add_mixed_timedeltalike_object_dtype_array(self, add_op):
# GH#21980
now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
- res = op(arr, Timedelta("1D"))
+ res = add_op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
- def test_td_add_sub_td64_ndarray(self):
- td = Timedelta("1 day")
-
- other = np.array([td.to_timedelta64()])
- expected = np.array([Timedelta("2 Days").to_timedelta64()])
-
- result = td + other
- tm.assert_numpy_array_equal(result, expected)
- result = other + td
+ def test_add_td64_ndarray(self, ten_days: Timedelta, add_op):
+ result = add_op(ten_days, np.array([np.timedelta64(1, "D")]))
+ expected = np.array([Timedelta(days=11).to_timedelta64()])
tm.assert_numpy_array_equal(result, expected)
- result = td - other
- tm.assert_numpy_array_equal(result, expected * 0)
- result = other - td
- tm.assert_numpy_array_equal(result, expected * 0)
+ def test_sub_td64_ndarray(self, ten_days: Timedelta, sub_op):
+ result = sub_op(ten_days, np.array([np.timedelta64(10, "D")]))
+ expected = np.array([0], dtype="timedelta64[ns]")
+ tm.assert_numpy_array_equal(result, expected)
- def test_td_add_sub_dt64_ndarray(self):
+ def test_add_sub_dt64_ndarray(self):
td = Timedelta("1 day")
other = pd.to_datetime(["2000-01-01"]).values
@@ -317,279 +380,378 @@ def test_td_add_sub_dt64_ndarray(self):
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
+ def test_na(self):
+ pass
-class TestTimedeltaMultiplicationDivision:
- """
- Tests for Timedelta methods:
-
- __mul__, __rmul__,
- __div__, __rdiv__,
- __truediv__, __rtruediv__,
- __floordiv__, __rfloordiv__,
- __mod__, __rmod__,
- __divmod__, __rdivmod__
+
+class TestMultiplicationScalar:
"""
+ Tests against the following Timedelta methods, where second operand is a scalar:
- # ---------------------------------------------------------------
- # Timedelta.__mul__, __rmul__
+ __mul__,__rmul__
+ """
@pytest.mark.parametrize(
- "td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
+ "factor,expected",
+ ((2, 20), (1.5, 15), (-1, -10), (-1, -10)),
)
- @pytest.mark.parametrize("op", [operator.mul, ops.rmul])
- def test_td_mul_nat(self, op, td_nat):
- # GH#19819
- td = Timedelta(10, unit="d")
- typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"])
- msg = "|".join(
- [
- rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'",
- r"ufunc '?multiply'? cannot use operands with types",
- ]
- )
- with pytest.raises(TypeError, match=msg):
- op(td, td_nat)
-
- @pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
- @pytest.mark.parametrize("op", [operator.mul, ops.rmul])
- def test_td_mul_nan(self, op, nan):
- # np.float64('NaN') has a 'dtype' attr, avoid treating as array
- td = Timedelta(10, unit="d")
- result = op(td, nan)
- assert result is NaT
-
- @pytest.mark.parametrize("op", [operator.mul, ops.rmul])
- def test_td_mul_scalar(self, op):
+ def test_numeric(self, ten_days: Timedelta, mul_op, factor, expected):
# GH#19738
- td = Timedelta(minutes=3)
-
- result = op(td, 2)
- assert result == Timedelta(minutes=6)
-
- result = op(td, 1.5)
- assert result == Timedelta(minutes=4, seconds=30)
-
- assert op(td, np.nan) is NaT
-
- assert op(-1, td).value == -1 * td.value
- assert op(-1.0, td).value == -1.0 * td.value
-
- msg = "unsupported operand type"
- with pytest.raises(TypeError, match=msg):
- # timedelta * datetime is gibberish
- op(td, Timestamp(2016, 1, 2))
-
- with pytest.raises(TypeError, match=msg):
- # invalid multiply with another timedelta
- op(td, td)
+ result = mul_op(ten_days, factor)
+ assert result == Timedelta(expected, "D")
+ assert isinstance(result, Timedelta)
- def test_td_mul_numeric_ndarray(self):
- td = Timedelta("1 day")
- other = np.array([2])
- expected = np.array([Timedelta("2 Days").to_timedelta64()])
+ @pytest.mark.parametrize("value", (Timedelta.min, Timedelta.max, offsets.Day(1)))
+ def test_raises_for_datetimelike_timedeltalike_offset(
+ self,
+ ten_days: Timedelta,
+ mul_op,
+ value,
+ invalid_op_msg: str,
+ ):
+ # timedelta * datetime is gibberish, as is multiplying by another timedelta
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ mul_op(ten_days, value)
+
+ @pytest.mark.parametrize("value", (Timedelta.min, Timedelta.max))
+ @pytest.mark.parametrize("factor", (1.01, 2), ids=("int", "float"))
+ def test_raises_for_overflow(self, value, mul_op, factor, td_overflow_msg: str):
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ mul_op(value, factor)
+
+ def test_na(self, request, ten_days: Timedelta, mul_op, na_value):
+ if na_value is None or na_value is NaT or na_value is NA:
+ request.applymarker(xfail_type_error)
+ result = mul_op(ten_days, na_value)
+ assert result is NaT
- result = td * other
- tm.assert_numpy_array_equal(result, expected)
- result = other * td
- tm.assert_numpy_array_equal(result, expected)
+class TestMultiplicationBox:
+ """
+ Tests against the following Timedelta methods, where second operand is a
+ Array/Index/Series/DataFrame:
- def test_td_mul_td64_ndarray_invalid(self):
- td = Timedelta("1 day")
- other = np.array([Timedelta("2 Days").to_timedelta64()])
+ __mul__,__rmul__
+ """
- msg = (
- "ufunc '?multiply'? cannot use operands with types "
- r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
+ @pytest.mark.parametrize("factor,expected", ((2, 20), (1.5, 15)))
+ def test_numeric(self, ten_days, mul_op, factor, expected, box_with_array):
+ other = tm.box_expected([factor], box_with_array)
+ result = mul_op(ten_days, other)
+ expected = tm.box_expected(
+ [Timedelta(expected, "D").to_timedelta64()],
+ box_with_array,
)
- with pytest.raises(TypeError, match=msg):
- td * other
- with pytest.raises(TypeError, match=msg):
- other * td
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.skipif(not compat.IS64, reason="flaky")
+ @pytest.mark.xfail(compat.IS64, reason="no overflow check", raises=AssertionError)
+ @pytest.mark.parametrize("factors", ((1, 2), (1, 1.5)), ids=("ints", "floats"))
+ def test_returns_nat_if_result_overflows(self, mul_op, factors, box_with_array):
+ numeric_box = tm.box_expected(factors, box_with_array)
+ result = mul_op(Timedelta.max, numeric_box)
+ expected = tm.box_expected((Timedelta.max, NaT), box_with_array)
+
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.parametrize("value", (Timedelta.min, Timedelta.max, offsets.Day(1)))
+ def test_raises_for_datetimelike_timedeltalike_offset(
+ self,
+ ten_days: Timedelta,
+ mul_op,
+ value,
+ box_with_array,
+ invalid_op_msg: str,
+ ):
+ other = tm.box_expected([value], box_with_array)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ mul_op(ten_days, other)
+
+ def test_na(self):
+ ...
+
+
+class TestDivisionScalar:
+ """
+ Tests against the following Timedelta methods, where second operand is a scalar:
- # ---------------------------------------------------------------
- # Timedelta.__div__, __truediv__
+ __truediv__,__rtrueidv
+ __floordiv__,__rfloordiv__
+ __mod__,__rmod__
+ __divmod__,__rdivmod__
+ """
- def test_td_div_timedeltalike_scalar(self):
+ @pytest.mark.parametrize(
+ ("div_op", "divisor", "expected"),
+ (
+ (operator.truediv, 2.0, Timedelta(days=5)),
+ (operator.floordiv, 3, Timedelta(days=3, hours=8)),
+ (operator.mod, 11, Timedelta(nanoseconds=6)),
+ (divmod, 3, (Timedelta(days=3, hours=8), Timedelta(0))),
+ ),
+ ids=("truediv", "floordiv", "mod", "divmod"),
+ )
+ def test_div_numeric(
+ self,
+ ten_days: Timedelta,
+ div_op,
+ divisor,
+ any_real_numpy_dtype,
+ expected,
+ ):
# GH#19738
- td = Timedelta(10, unit="d")
-
- result = td / offsets.Hour(1)
- assert result == 240
-
- assert td / td == 1
- assert td / np.timedelta64(60, "h") == 4
+ scalar = np.dtype(any_real_numpy_dtype).type(divisor)
+ result = div_op(ten_days, scalar)
- assert np.isnan(td / NaT)
-
- def test_td_div_td64_non_nano(self):
+ assert result == expected
+ if div_op is divmod:
+ assert all(isinstance(r, Timedelta) for r in result)
+ else:
+ assert isinstance(result, Timedelta)
- # truediv
- td = Timedelta("1 days 2 hours 3 ns")
- result = td / np.timedelta64(1, "D")
- assert result == td.value / (86400 * 10**9)
- result = td / np.timedelta64(1, "s")
- assert result == td.value / 10**9
- result = td / np.timedelta64(1, "ns")
- assert result == td.value
+ @pytest.mark.parametrize(
+ "rdiv_op",
+ (ops.rtruediv, ops.rfloordiv, ops.rmod, ops.rdivmod),
+ )
+ def test_rdiv_numeric_raises(
+ self,
+ ten_days: Timedelta,
+ rdiv_op,
+ any_real_numpy_dtype,
+ invalid_op_msg: str,
+ ):
+ scalar = np.dtype(any_real_numpy_dtype).type(1)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ rdiv_op(ten_days, scalar)
- # floordiv
- td = Timedelta("1 days 2 hours 3 ns")
- result = td // np.timedelta64(1, "D")
- assert result == 1
- result = td // np.timedelta64(1, "s")
- assert result == 93600
- result = td // np.timedelta64(1, "ns")
- assert result == td.value
+ @pytest.mark.parametrize(
+ "any_div_op",
+ (
+ operator.truediv,
+ ops.rtruediv,
+ operator.floordiv,
+ ops.rfloordiv,
+ operator.mod,
+ ops.rmod,
+ divmod,
+ ops.rdivmod,
+ ),
+ )
+ def test_datetimelike_raises(
+ self,
+ ten_days: Timedelta,
+ any_div_op,
+ y2k,
+ invalid_op_msg: str,
+ ):
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ any_div_op(ten_days, y2k)
- def test_td_div_numeric_scalar(self):
+ @pytest.mark.parametrize(
+ ("div_op", "expected"),
+ (
+ (operator.truediv, 10),
+ (operator.floordiv, 10),
+ (operator.mod, Timedelta(0)),
+ (divmod, (10, Timedelta(0))),
+ (ops.rtruediv, 0.1),
+ (ops.rfloordiv, 0),
+ (ops.rmod, Timedelta(days=1)),
+ (ops.rdivmod, (0, Timedelta(days=1))),
+ ),
+ )
+ def test_timedeltalike(self, ten_days: Timedelta, div_op, one_day, expected):
# GH#19738
- td = Timedelta(10, unit="d")
-
- result = td / 2
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=5)
-
- result = td / 5
- assert isinstance(result, Timedelta)
- assert result == Timedelta(days=2)
+ result = div_op(ten_days, one_day)
+ assert result == expected
@pytest.mark.parametrize(
- "nan",
- [
- np.nan,
- np.float64("NaN"),
- float("nan"),
- ],
+ ("div_op", "expected"),
+ (
+ (operator.truediv, 10),
+ (operator.floordiv, 10),
+ (operator.mod, Timedelta(0)),
+ (divmod, (10, Timedelta(0))),
+ (ops.rtruediv, 0.1),
+ (ops.rfloordiv, 0),
+ (ops.rmod, Timedelta(days=1)),
+ (ops.rdivmod, (0, Timedelta(days=1))),
+ ),
)
- def test_td_div_nan(self, nan):
- # np.float64('NaN') has a 'dtype' attr, avoid treating as array
- td = Timedelta(10, unit="d")
- result = td / nan
- assert result is NaT
+ def test_offset(self, ten_days: Timedelta, div_op, expected):
+ result = div_op(ten_days, offsets.Day(1))
+ assert result == expected
- result = td // nan
- assert result is NaT
+ def test_na(self, request, ten_days: Timedelta, truediv_op, na_value):
+ expected: NaTType | float = NaT
- def test_td_div_td64_ndarray(self):
- td = Timedelta("1 day")
+ if na_value is None or na_value is NaT:
+ expected = np.nan
+ elif na_value is NA or (
+ truediv_op is ops.rtruediv and isinstance(na_value, float)
+ ):
+ request.applymarker(xfail_type_error)
- other = np.array([Timedelta("2 Days").to_timedelta64()])
- expected = np.array([0.5])
+ result = truediv_op(ten_days, na_value)
+ assert result is expected
- result = td / other
- tm.assert_numpy_array_equal(result, expected)
+ def test_floordiv_na(self, request, ten_days: Timedelta, na_value):
+ expected: NaTType | float = NaT
- result = other / td
- tm.assert_numpy_array_equal(result, expected * 4)
+ if na_value is None or na_value is NaT:
+ expected = np.nan
+ elif na_value is NA:
+ request.applymarker(xfail_type_error)
- # ---------------------------------------------------------------
- # Timedelta.__rdiv__
+ result = ten_days // na_value
+ assert result is expected
- def test_td_rdiv_timedeltalike_scalar(self):
- # GH#19738
- td = Timedelta(10, unit="d")
- result = offsets.Hour(1) / td
- assert result == 1 / 240.0
+ def test_rfloordiv_na(self, request, ten_days: Timedelta, na_value):
+ expected = np.nan
+ if na_value is NA or isinstance(na_value, float):
+ request.applymarker(xfail_type_error)
- assert np.timedelta64(60, "h") / td == 0.25
+ result = na_value // ten_days
+ assert result is expected
- def test_td_rdiv_na_scalar(self):
- # GH#31869 None gets cast to NaT
- td = Timedelta(10, unit="d")
+ def test_mod_na(self, request, ten_days: Timedelta, na_value):
+ expected = NaT
+ if na_value is None or na_value is NA:
+ request.applymarker(xfail_type_error)
- result = NaT / td
- assert np.isnan(result)
+ result = ten_days % na_value
+ assert result is expected
- result = None / td
- assert np.isnan(result)
+ def test_rmod_na(self, request, ten_days: Timedelta, na_value):
+ if na_value is not NaT:
+ request.applymarker(xfail_type_error)
- result = np.timedelta64("NaT") / td
- assert np.isnan(result)
+ result = na_value % ten_days
+ assert result is NaT
- msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'"
- with pytest.raises(TypeError, match=msg):
- np.datetime64("NaT") / td
+ def test_divmod_na(self, request, ten_days: Timedelta, na_value):
+ expected: tuple[NaTType | float, NaTType] = (NaT, NaT)
- msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
- with pytest.raises(TypeError, match=msg):
- np.nan / td
+ if na_value is None or na_value is NA:
+ request.applymarker(xfail_type_error)
+ elif na_value is NaT:
+ expected = (np.nan, NaT)
- def test_td_rdiv_ndarray(self):
- td = Timedelta(10, unit="d")
+ result = divmod(ten_days, na_value)
+ assert result == expected
- arr = np.array([td], dtype=object)
- result = arr / td
- expected = np.array([1], dtype=np.float64)
- tm.assert_numpy_array_equal(result, expected)
+ def test_rdivmod_na(self, request, ten_days: Timedelta, na_value):
+ expected = (np.nan, NaT)
+ if na_value is not NaT:
+ request.applymarker(xfail_type_error)
- arr = np.array([None])
- result = arr / td
- expected = np.array([np.nan])
- tm.assert_numpy_array_equal(result, expected)
+ result = ops.rdivmod(ten_days, na_value)
+ assert result == expected
- arr = np.array([np.nan], dtype=object)
- msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
- with pytest.raises(TypeError, match=msg):
- arr / td
- arr = np.array([np.nan], dtype=np.float64)
- msg = "cannot use operands with types dtype"
- with pytest.raises(TypeError, match=msg):
- arr / td
+class TestDivisionBox:
+ """
+ Tests against the following Timedelta methods, where second operand is a
+ Array/Index/Series/DataFrame:
- # ---------------------------------------------------------------
- # Timedelta.__floordiv__
+ __truediv__,__rtrueidv
+ __floordiv__,__rfloordiv__
+ __mod__,__rmod__
+ __divmod__,__rdivmod__
+ """
- def test_td_floordiv_timedeltalike_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=4)
- scalar = Timedelta(hours=3, minutes=3)
+ def test_truediv_numeric(self, ten_days: Timedelta, any_real_numpy_dtype):
+ # GH#19738
+ scalar = np.dtype(any_real_numpy_dtype).type(2.0)
+ result = ten_days / scalar
+ assert isinstance(result, Timedelta)
+ assert result == Timedelta(days=5)
- assert td // scalar == 1
- assert -td // scalar.to_pytimedelta() == -2
- assert (2 * td) // scalar.to_timedelta64() == 2
+ def test_rtruediv_numeric_raises(
+ self,
+ ten_days: Timedelta,
+ invalid_op_msg: str,
+ any_real_numpy_dtype,
+ ):
+ scalar = np.dtype(any_real_numpy_dtype).type(2.0)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ scalar / ten_days
- def test_td_floordiv_null_scalar(self):
+ @pytest.mark.parametrize(
+ "any_div_op",
+ (
+ operator.truediv,
+ ops.rtruediv,
+ operator.floordiv,
+ ops.rfloordiv,
+ operator.mod,
+ ops.rmod,
+ divmod,
+ ops.rdivmod,
+ ),
+ )
+ def test_datetimelike_raises(
+ self,
+ ten_days: Timedelta,
+ any_div_op,
+ y2k,
+ box_with_array,
+ invalid_op_msg: str,
+ ):
+ other = tm.box_expected((y2k,), box_with_array)
+ with pytest.raises(TypeError, match=invalid_op_msg):
+ any_div_op(ten_days, other)
+
+ def test_timedeltalike(
+ self,
+ ten_days: Timedelta,
+ truediv_op,
+ tdlike_cls,
+ box_with_array,
+ ):
+ # TODO:
+ elem = tdlike_cls(days=10) if tdlike_cls is timedelta else tdlike_cls(10, "D")
+ other = tm.box_expected((elem,), box_with_array)
+
+ if box_with_array is pd.array:
+ expected = np.array((1.0,))
+ else:
+ expected = tm.box_expected((1.0,), box_with_array)
+
+ result = truediv_op(ten_days, other)
+ tm.assert_equal(result, expected)
+
+ def test_offset(self, ten_days: Timedelta):
+ ...
+
+ def test_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
+ ser = pd.Series([1], dtype=np.int64)
+ res = td // ser
+ assert res.dtype.kind == "m"
- assert td // np.nan is NaT
- assert np.isnan(td // NaT)
- assert np.isnan(td // np.timedelta64("NaT"))
-
- def test_td_floordiv_offsets(self):
- # GH#19738
- td = Timedelta(hours=3, minutes=4)
- assert td // offsets.Hour(1) == 3
- assert td // offsets.Minute(2) == 92
-
- def test_td_floordiv_invalid_scalar(self):
+ def test_rfloordiv_numeric_series(self):
# GH#18846
- td = Timedelta(hours=3, minutes=4)
+ td = Timedelta(hours=3, minutes=3)
+ ser = pd.Series([1], dtype=np.int64)
+ res = td.__rfloordiv__(ser)
+ assert res is NotImplemented
- msg = "|".join(
- [
- r"Invalid dtype datetime64\[D\] for __floordiv__",
- "'dtype' is an invalid keyword argument for this function",
- r"ufunc '?floor_divide'? cannot use operands with types",
- ]
- )
+ msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
- td // np.datetime64("2016-01-01", dtype="datetime64[us]")
+ # Deprecated GH#19761, enforced GH#29797
+ ser // td
- def test_td_floordiv_numeric_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=4)
+ def test_rfloordiv_intarray(self):
+ # deprecated GH#19761, enforced GH#29797
+ ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9
- expected = Timedelta(hours=1, minutes=32)
- assert td // 2 == expected
- assert td // 2.0 == expected
- assert td // np.float64(2.0) == expected
- assert td // np.int32(2.0) == expected
- assert td // np.uint8(2.0) == expected
+ msg = "Invalid dtype"
+ with pytest.raises(TypeError, match=msg):
+ ints // Timedelta(1, unit="s")
- def test_td_floordiv_timedeltalike_array(self):
+ def test_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
@@ -605,77 +767,7 @@ def test_td_floordiv_timedeltalike_array(self):
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
- def test_td_floordiv_numeric_series(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=4)
- ser = pd.Series([1], dtype=np.int64)
- res = td // ser
- assert res.dtype.kind == "m"
-
- # ---------------------------------------------------------------
- # Timedelta.__rfloordiv__
-
- def test_td_rfloordiv_timedeltalike_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=3)
- scalar = Timedelta(hours=3, minutes=4)
-
- # scalar others
- # x // Timedelta is defined only for timedelta-like x. int-like,
- # float-like, and date-like, in particular, should all either
- # a) raise TypeError directly or
- # b) return NotImplemented, following which the reversed
- # operation will raise TypeError.
- assert td.__rfloordiv__(scalar) == 1
- assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
- assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
-
- def test_td_rfloordiv_null_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=3)
-
- assert np.isnan(td.__rfloordiv__(NaT))
- assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
-
- def test_td_rfloordiv_offsets(self):
- # GH#19738
- assert offsets.Hour(1) // Timedelta(minutes=25) == 2
-
- def test_td_rfloordiv_invalid_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=3)
-
- dt64 = np.datetime64("2016-01-01", "us")
-
- assert td.__rfloordiv__(dt64) is NotImplemented
-
- msg = (
- r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'"
- )
- with pytest.raises(TypeError, match=msg):
- dt64 // td
-
- def test_td_rfloordiv_numeric_scalar(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=3)
-
- assert td.__rfloordiv__(np.nan) is NotImplemented
- assert td.__rfloordiv__(3.5) is NotImplemented
- assert td.__rfloordiv__(2) is NotImplemented
- assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented
- assert td.__rfloordiv__(np.uint8(9)) is NotImplemented
- assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented
-
- msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta"
- with pytest.raises(TypeError, match=msg):
- np.float64(2.0) // td
- with pytest.raises(TypeError, match=msg):
- np.uint8(9) // td
- with pytest.raises(TypeError, match=msg):
- # deprecated GH#19761, enforced GH#29797
- np.int32(2.0) // td
-
- def test_td_rfloordiv_timedeltalike_array(self):
+ def test_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
@@ -692,230 +784,11 @@ def test_td_rfloordiv_timedeltalike_array(self):
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
- def test_td_rfloordiv_intarray(self):
- # deprecated GH#19761, enforced GH#29797
- ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9
-
- msg = "Invalid dtype"
- with pytest.raises(TypeError, match=msg):
- ints // Timedelta(1, unit="s")
-
- def test_td_rfloordiv_numeric_series(self):
- # GH#18846
- td = Timedelta(hours=3, minutes=3)
- ser = pd.Series([1], dtype=np.int64)
- res = td.__rfloordiv__(ser)
- assert res is NotImplemented
-
- msg = "Invalid dtype"
- with pytest.raises(TypeError, match=msg):
- # Deprecated GH#19761, enforced GH#29797
- ser // td
-
- # ----------------------------------------------------------------
- # Timedelta.__mod__, __rmod__
-
- def test_mod_timedeltalike(self):
- # GH#19365
- td = Timedelta(hours=37)
-
- # Timedelta-like others
- result = td % Timedelta(hours=6)
- assert isinstance(result, Timedelta)
- assert result == Timedelta(hours=1)
-
- result = td % timedelta(minutes=60)
- assert isinstance(result, Timedelta)
- assert result == Timedelta(0)
-
- result = td % NaT
- assert result is NaT
-
- def test_mod_timedelta64_nat(self):
- # GH#19365
- td = Timedelta(hours=37)
-
- result = td % np.timedelta64("NaT", "ns")
- assert result is NaT
-
- def test_mod_timedelta64(self):
- # GH#19365
- td = Timedelta(hours=37)
-
- result = td % np.timedelta64(2, "h")
- assert isinstance(result, Timedelta)
- assert result == Timedelta(hours=1)
-
- def test_mod_offset(self):
- # GH#19365
- td = Timedelta(hours=37)
-
- result = td % offsets.Hour(5)
- assert isinstance(result, Timedelta)
- assert result == Timedelta(hours=2)
-
- def test_mod_numeric(self):
- # GH#19365
- td = Timedelta(hours=37)
-
- # Numeric Others
- result = td % 2
- assert isinstance(result, Timedelta)
- assert result == Timedelta(0)
-
- result = td % 1e12
- assert isinstance(result, Timedelta)
- assert result == Timedelta(minutes=3, seconds=20)
-
- result = td % int(1e12)
- assert isinstance(result, Timedelta)
- assert result == Timedelta(minutes=3, seconds=20)
-
- def test_mod_invalid(self):
- # GH#19365
- td = Timedelta(hours=37)
- msg = "unsupported operand type"
- with pytest.raises(TypeError, match=msg):
- td % Timestamp("2018-01-22")
-
- with pytest.raises(TypeError, match=msg):
- td % []
-
- def test_rmod_pytimedelta(self):
- # GH#19365
- td = Timedelta(minutes=3)
-
- result = timedelta(minutes=4) % td
- assert isinstance(result, Timedelta)
- assert result == Timedelta(minutes=1)
-
- def test_rmod_timedelta64(self):
- # GH#19365
- td = Timedelta(minutes=3)
- result = np.timedelta64(5, "m") % td
- assert isinstance(result, Timedelta)
- assert result == Timedelta(minutes=2)
-
- def test_rmod_invalid(self):
- # GH#19365
- td = Timedelta(minutes=3)
-
- msg = "unsupported operand"
- with pytest.raises(TypeError, match=msg):
- Timestamp("2018-01-22") % td
-
- with pytest.raises(TypeError, match=msg):
- 15 % td
-
- with pytest.raises(TypeError, match=msg):
- 16.0 % td
-
- msg = "Invalid dtype int"
- with pytest.raises(TypeError, match=msg):
- np.array([22, 24]) % td
-
- # ----------------------------------------------------------------
- # Timedelta.__divmod__, __rdivmod__
-
- def test_divmod_numeric(self):
- # GH#19365
- td = Timedelta(days=2, hours=6)
-
- result = divmod(td, 53 * 3600 * 1e9)
- assert result[0] == Timedelta(1, unit="ns")
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(hours=1)
-
- assert result
- result = divmod(td, np.nan)
- assert result[0] is NaT
- assert result[1] is NaT
+ def test_na(self):
+ ...
- def test_divmod(self):
- # GH#19365
- td = Timedelta(days=2, hours=6)
- result = divmod(td, timedelta(days=1))
- assert result[0] == 2
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(hours=6)
-
- result = divmod(td, 54)
- assert result[0] == Timedelta(hours=1)
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(0)
-
- result = divmod(td, NaT)
- assert np.isnan(result[0])
- assert result[1] is NaT
-
- def test_divmod_offset(self):
- # GH#19365
- td = Timedelta(days=2, hours=6)
-
- result = divmod(td, offsets.Hour(-4))
- assert result[0] == -14
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(hours=-2)
-
- def test_divmod_invalid(self):
- # GH#19365
- td = Timedelta(days=2, hours=6)
-
- msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'"
- with pytest.raises(TypeError, match=msg):
- divmod(td, Timestamp("2018-01-22"))
-
- def test_rdivmod_pytimedelta(self):
- # GH#19365
- result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
- assert result[0] == 2
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(hours=6)
-
- def test_rdivmod_offset(self):
- result = divmod(offsets.Hour(54), Timedelta(hours=-4))
- assert result[0] == -14
- assert isinstance(result[1], Timedelta)
- assert result[1] == Timedelta(hours=-2)
-
- def test_rdivmod_invalid(self):
- # GH#19365
- td = Timedelta(minutes=3)
- msg = "unsupported operand type"
-
- with pytest.raises(TypeError, match=msg):
- divmod(Timestamp("2018-01-22"), td)
-
- with pytest.raises(TypeError, match=msg):
- divmod(15, td)
-
- with pytest.raises(TypeError, match=msg):
- divmod(16.0, td)
-
- msg = "Invalid dtype int"
- with pytest.raises(TypeError, match=msg):
- divmod(np.array([22, 24]), td)
-
- # ----------------------------------------------------------------
-
- @pytest.mark.parametrize(
- "op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
- )
- @pytest.mark.parametrize(
- "arr",
- [
- np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
- np.array([Timestamp("2021-11-09 09:54:00"), Timedelta("1D")]),
- ],
- )
- def test_td_op_timedelta_timedeltalike_array(self, op, arr):
- msg = "unsupported operand type|cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
- op(arr, Timedelta("1D"))
-
-
-class TestTimedeltaComparison:
+class TestComparison:
def test_compare_tick(self, tick_classes):
cls = tick_classes
@@ -1029,40 +902,5 @@ def test_compare_unknown_type(self, val):
with pytest.raises(TypeError, match=msg):
t < val
-
-def test_ops_notimplemented():
- class Other:
+ def test_na(self):
pass
-
- other = Other()
-
- td = Timedelta("1 day")
- assert td.__add__(other) is NotImplemented
- assert td.__sub__(other) is NotImplemented
- assert td.__truediv__(other) is NotImplemented
- assert td.__mul__(other) is NotImplemented
- assert td.__floordiv__(other) is NotImplemented
-
-
-def test_ops_error_str():
- # GH#13624
- td = Timedelta("1 day")
-
- for left, right in [(td, "a"), ("a", td)]:
-
- msg = "|".join(
- [
- "unsupported operand type",
- r'can only concatenate str \(not "Timedelta"\) to str',
- "must be str, not Timedelta",
- ]
- )
- with pytest.raises(TypeError, match=msg):
- left + right
-
- msg = "not supported between instances of"
- with pytest.raises(TypeError, match=msg):
- left > right
-
- assert not left == right
- assert left != right
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
deleted file mode 100644
index 7fc7bd3a5a74d..0000000000000
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ /dev/null
@@ -1,394 +0,0 @@
-from datetime import timedelta
-from itertools import product
-
-import numpy as np
-import pytest
-
-from pandas._libs.tslibs import OutOfBoundsTimedelta
-
-from pandas import (
- Timedelta,
- offsets,
- to_timedelta,
-)
-
-
-def test_construct_from_td64_with_unit():
- # ignore the unit, as it may cause silently overflows leading to incorrect
- # results, and in non-overflow cases is irrelevant GH#46827
- obj = np.timedelta64(123456789, "h")
-
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
- Timedelta(obj, unit="ps")
-
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
- Timedelta(obj, unit="ns")
-
- with pytest.raises(OutOfBoundsTimedelta, match="123456789 hours"):
- Timedelta(obj)
-
-
-def test_construction():
- expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8")
- assert Timedelta(10, unit="d").value == expected
- assert Timedelta(10.0, unit="d").value == expected
- assert Timedelta("10 days").value == expected
- assert Timedelta(days=10).value == expected
- assert Timedelta(days=10.0).value == expected
-
- expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8")
- assert Timedelta("10 days 00:00:10").value == expected
- assert Timedelta(days=10, seconds=10).value == expected
- assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
- assert Timedelta(days=10, microseconds=10 * 1000 * 1000).value == expected
-
- # rounding cases
- assert Timedelta(82739999850000).value == 82739999850000
- assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000))
- assert Timedelta(123072001000000).value == 123072001000000
- assert "1 days 10:11:12.001" in str(Timedelta(123072001000000))
-
- # string conversion with/without leading zero
- # GH#9570
- assert Timedelta("0:00:00") == timedelta(hours=0)
- assert Timedelta("00:00:00") == timedelta(hours=0)
- assert Timedelta("-1:00:00") == -timedelta(hours=1)
- assert Timedelta("-01:00:00") == -timedelta(hours=1)
-
- # more strings & abbrevs
- # GH#8190
- assert Timedelta("1 h") == timedelta(hours=1)
- assert Timedelta("1 hour") == timedelta(hours=1)
- assert Timedelta("1 hr") == timedelta(hours=1)
- assert Timedelta("1 hours") == timedelta(hours=1)
- assert Timedelta("-1 hours") == -timedelta(hours=1)
- assert Timedelta("1 m") == timedelta(minutes=1)
- assert Timedelta("1.5 m") == timedelta(seconds=90)
- assert Timedelta("1 minute") == timedelta(minutes=1)
- assert Timedelta("1 minutes") == timedelta(minutes=1)
- assert Timedelta("1 s") == timedelta(seconds=1)
- assert Timedelta("1 second") == timedelta(seconds=1)
- assert Timedelta("1 seconds") == timedelta(seconds=1)
- assert Timedelta("1 ms") == timedelta(milliseconds=1)
- assert Timedelta("1 milli") == timedelta(milliseconds=1)
- assert Timedelta("1 millisecond") == timedelta(milliseconds=1)
- assert Timedelta("1 us") == timedelta(microseconds=1)
- assert Timedelta("1 µs") == timedelta(microseconds=1)
- assert Timedelta("1 micros") == timedelta(microseconds=1)
- assert Timedelta("1 microsecond") == timedelta(microseconds=1)
- assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500")
- assert Timedelta("1 ns") == Timedelta("00:00:00.000000001")
- assert Timedelta("1 nano") == Timedelta("00:00:00.000000001")
- assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001")
-
- # combos
- assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1)
- assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1)
- assert Timedelta("10 days 1 h 1m 1s") == timedelta(
- days=10, hours=1, minutes=1, seconds=1
- )
- assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
- days=10, hours=1, minutes=1, seconds=1
- )
- assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
- days=10, hours=1, minutes=1, seconds=1
- )
- assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta(
- days=10, hours=1, minutes=1, seconds=1, microseconds=3
- )
- assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta(
- days=10, hours=1, minutes=1, seconds=31, microseconds=3
- )
-
- # Currently invalid as it has a - on the hh:mm:dd part
- # (only allowed on the days)
- msg = "only leading negative signs are allowed"
- with pytest.raises(ValueError, match=msg):
- Timedelta("-10 days -1 h 1.5m 1s 3us")
-
- # only leading neg signs are allowed
- with pytest.raises(ValueError, match=msg):
- Timedelta("10 days -1 h 1.5m 1s 3us")
-
- # no units specified
- msg = "no units specified"
- with pytest.raises(ValueError, match=msg):
- Timedelta("3.1415")
-
- # invalid construction
- msg = "cannot construct a Timedelta"
- with pytest.raises(ValueError, match=msg):
- Timedelta()
-
- msg = "unit abbreviation w/o a number"
- with pytest.raises(ValueError, match=msg):
- Timedelta("foo")
-
- msg = (
- "cannot construct a Timedelta from "
- "the passed arguments, allowed keywords are "
- )
- with pytest.raises(ValueError, match=msg):
- Timedelta(day=10)
-
- # floats
- expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64(
- 500, "ms"
- ).astype("m8[ns]").view("i8")
- assert Timedelta(10.5, unit="s").value == expected
-
- # offset
- assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2)
- assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2)
- assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2)
-
- # GH#11995: unicode
- expected = Timedelta("1H")
- result = Timedelta("1H")
- assert result == expected
- assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00")
-
- msg = "unit abbreviation w/o a number"
- with pytest.raises(ValueError, match=msg):
- Timedelta("foo bar")
-
-
-@pytest.mark.parametrize(
- "item",
- list(
- {
- "days": "D",
- "seconds": "s",
- "microseconds": "us",
- "milliseconds": "ms",
- "minutes": "m",
- "hours": "h",
- "weeks": "W",
- }.items()
- ),
-)
-@pytest.mark.parametrize(
- "npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16]
-)
-def test_td_construction_with_np_dtypes(npdtype, item):
- # GH#8757: test construction with np dtypes
- pykwarg, npkwarg = item
- expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8")
- assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
-
-
-@pytest.mark.parametrize(
- "val",
- [
- "1s",
- "-1s",
- "1us",
- "-1us",
- "1 day",
- "-1 day",
- "-23:59:59.999999",
- "-1 days +23:59:59.999999",
- "-1ns",
- "1ns",
- "-23:59:59.999999999",
- ],
-)
-def test_td_from_repr_roundtrip(val):
- # round-trip both for string and value
- td = Timedelta(val)
- assert Timedelta(td.value) == td
-
- assert Timedelta(str(td)) == td
- assert Timedelta(td._repr_base(format="all")) == td
- assert Timedelta(td._repr_base()) == td
-
-
-def test_overflow_on_construction():
- msg = "int too (large|big) to convert"
-
- # GH#3374
- value = Timedelta("1day").value * 20169940
- with pytest.raises(OverflowError, match=msg):
- Timedelta(value)
-
- # xref GH#17637
- with pytest.raises(OverflowError, match=msg):
- Timedelta(7 * 19999, unit="D")
-
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
- Timedelta(timedelta(days=13 * 19999))
-
-
-@pytest.mark.parametrize(
- "val, unit, name",
- [
- (3508, "M", " months"),
- (15251, "W", " weeks"), # 1
- (106752, "D", " days"), # change from previous:
- (2562048, "h", " hours"), # 0 hours
- (153722868, "m", " minutes"), # 13 minutes
- (9223372037, "s", " seconds"), # 44 seconds
- ],
-)
-def test_construction_out_of_bounds_td64(val, unit, name):
- # TODO: parametrize over units just above/below the implementation bounds
- # once GH#38964 is resolved
-
- # Timedelta.max is just under 106752 days
- td64 = np.timedelta64(val, unit)
- assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong
-
- msg = str(val) + name
- with pytest.raises(OutOfBoundsTimedelta, match=msg):
- Timedelta(td64)
-
- # But just back in bounds and we are OK
- assert Timedelta(td64 - 1) == td64 - 1
-
- td64 *= -1
- assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong
-
- with pytest.raises(OutOfBoundsTimedelta, match="-" + msg):
- Timedelta(td64)
-
- # But just back in bounds and we are OK
- assert Timedelta(td64 + 1) == td64 + 1
-
-
-@pytest.mark.parametrize(
- "fmt,exp",
- [
- (
- "P6DT0H50M3.010010012S",
- Timedelta(
- days=6,
- minutes=50,
- seconds=3,
- milliseconds=10,
- microseconds=10,
- nanoseconds=12,
- ),
- ),
- (
- "P-6DT0H50M3.010010012S",
- Timedelta(
- days=-6,
- minutes=50,
- seconds=3,
- milliseconds=10,
- microseconds=10,
- nanoseconds=12,
- ),
- ),
- ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)),
- ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)),
- ("P0DT0H0M0.00001S", Timedelta(microseconds=10)),
- ("P0DT0H0M0.001S", Timedelta(milliseconds=1)),
- ("P0DT0H1M0S", Timedelta(minutes=1)),
- ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)),
- ("PT1S", Timedelta(seconds=1)),
- ("PT0S", Timedelta(seconds=0)),
- ("P1WT0S", Timedelta(days=7, seconds=0)),
- ("P1D", Timedelta(days=1)),
- ("P1DT1H", Timedelta(days=1, hours=1)),
- ("P1W", Timedelta(days=7)),
- ("PT300S", Timedelta(seconds=300)),
- ("P1DT0H0M00000000000S", Timedelta(days=1)),
- ("PT-6H3M", Timedelta(hours=-6, minutes=3)),
- ("-PT6H3M", Timedelta(hours=-6, minutes=-3)),
- ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)),
- ],
-)
-def test_iso_constructor(fmt, exp):
- assert Timedelta(fmt) == exp
-
-
-@pytest.mark.parametrize(
- "fmt",
- [
- "PPPPPPPPPPPP",
- "PDTHMS",
- "P0DT999H999M999S",
- "P1DT0H0M0.0000000000000S",
- "P1DT0H0M0.S",
- "P",
- "-P",
- ],
-)
-def test_iso_constructor_raises(fmt):
- msg = f"Invalid ISO 8601 Duration format - {fmt}"
- with pytest.raises(ValueError, match=msg):
- Timedelta(fmt)
-
-
-@pytest.mark.parametrize(
- "constructed_td, conversion",
- [
- (Timedelta(nanoseconds=100), "100ns"),
- (
- Timedelta(
- days=1,
- hours=1,
- minutes=1,
- weeks=1,
- seconds=1,
- milliseconds=1,
- microseconds=1,
- nanoseconds=1,
- ),
- 694861001001001,
- ),
- (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"),
- (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"),
- (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"),
- ],
-)
-def test_td_constructor_on_nanoseconds(constructed_td, conversion):
- # GH#9273
- assert constructed_td == Timedelta(conversion)
-
-
-def test_td_constructor_value_error():
- msg = "Invalid type <class 'str'>. Must be int or float."
- with pytest.raises(TypeError, match=msg):
- Timedelta(nanoseconds="abc")
-
-
-def test_timedelta_constructor_identity():
- # Test for #30543
- expected = Timedelta(np.timedelta64(1, "s"))
- result = Timedelta(expected)
- assert result is expected
-
-
-@pytest.mark.parametrize(
- "constructor, value, unit, expectation",
- [
- (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")),
- (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")),
- (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")),
- ],
-)
-def test_string_with_unit(constructor, value, unit, expectation):
- exp, match = expectation
- with pytest.raises(exp, match=match):
- _ = constructor(value, unit=unit)
-
-
-@pytest.mark.parametrize(
- "value",
- [
- "".join(elements)
- for repetition in (1, 2)
- for elements in product("+-, ", repeat=repetition)
- ],
-)
-def test_string_without_numbers(value):
- # GH39710 Timedelta input string with only symbols and no digits raises an error
- msg = (
- "symbols w/o a number"
- if value != "--"
- else "only leading negative signs are allowed"
- )
- with pytest.raises(ValueError, match=msg):
- Timedelta(value)
diff --git a/pandas/tests/scalar/timedelta/test_formats.py b/pandas/tests/scalar/timedelta/test_formats.py
deleted file mode 100644
index 753186ee4b738..0000000000000
--- a/pandas/tests/scalar/timedelta/test_formats.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import pytest
-
-from pandas import Timedelta
-
-
-@pytest.mark.parametrize(
- "td, expected_repr",
- [
- (Timedelta(10, unit="d"), "Timedelta('10 days 00:00:00')"),
- (Timedelta(10, unit="s"), "Timedelta('0 days 00:00:10')"),
- (Timedelta(10, unit="ms"), "Timedelta('0 days 00:00:00.010000')"),
- (Timedelta(-10, unit="ms"), "Timedelta('-1 days +23:59:59.990000')"),
- ],
-)
-def test_repr(td, expected_repr):
- assert repr(td) == expected_repr
-
-
-@pytest.mark.parametrize(
- "td, expected_iso",
- [
- (
- Timedelta(
- days=6,
- minutes=50,
- seconds=3,
- milliseconds=10,
- microseconds=10,
- nanoseconds=12,
- ),
- "P6DT0H50M3.010010012S",
- ),
- (Timedelta(days=4, hours=12, minutes=30, seconds=5), "P4DT12H30M5S"),
- (Timedelta(nanoseconds=123), "P0DT0H0M0.000000123S"),
- # trim nano
- (Timedelta(microseconds=10), "P0DT0H0M0.00001S"),
- # trim micro
- (Timedelta(milliseconds=1), "P0DT0H0M0.001S"),
- # don't strip every 0
- (Timedelta(minutes=1), "P0DT0H1M0S"),
- ],
-)
-def test_isoformat(td, expected_iso):
- assert td.isoformat() == expected_iso
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index cf7211e82b799..5843b1617f3fe 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -1,5 +1,19 @@
-""" test the scalar Timedelta """
+"""
+Tests against (most of) the public Timedelta API.
+
+See test_arithmetic.py, in this same directory, for tests of binary ops between a
+Timedelta scalar and another scalar or a Array/Index/Series/DataFrame.
+"""
+
+from __future__ import annotations
+
from datetime import timedelta
+from itertools import (
+ chain,
+ product,
+ zip_longest,
+)
+import operator
from hypothesis import (
given,
@@ -10,12 +24,14 @@
from pandas._libs import lib
from pandas._libs.tslibs import (
- NaT,
+ OutOfBoundsTimedelta,
iNaT,
)
import pandas as pd
from pandas import (
+ NA,
+ NaT,
Timedelta,
TimedeltaIndex,
offsets,
@@ -23,82 +39,598 @@
)
import pandas._testing as tm
+TD_UNITS = (
+ ("n", "ns", "nano", "nanos", "nanosecond", "nanoseconds"),
+ ("u", "us", "µs", "micro", "micros", "microsecond", "microseconds"),
+ ("l", "ms", "milli", "millis", "millisecond", "milliseconds"),
+ ("s", "sec", "second", "seconds"),
+ ("m", "t", "min", "minute", "minutes"),
+ ("h", "hr", "hour", "hours"),
+ ("d", "day", "days"),
+ ("w",),
+)
+TD_UNITS_UNIQUE = tuple(map(operator.itemgetter(0), TD_UNITS))
+TD_KWARGS = (
+ "nanoseconds",
+ "microseconds",
+ "milliseconds",
+ "seconds",
+ "minutes",
+ "hours",
+ "days",
+ "weeks",
+)
+TD_COMPONENTS = tuple(reversed(TD_KWARGS[:-1]))
+TD64_UNITS = ("ns", "us", "ms", "s", "m", "h", "D", "W")
+
+TD_KWARGS_TD_UNITS = dict(zip(TD_KWARGS, TD_UNITS))
+TD_UNITS_TD_KWARGS = dict(
+ chain.from_iterable(
+ zip_longest(units, (kwarg,), fillvalue=kwarg)
+ for kwarg, units in TD_KWARGS_TD_UNITS.items()
+ )
+)
+TD_KWARGS_TD64_UNITS = dict(zip(TD_KWARGS, TD64_UNITS))
+TD_UNITS_TD64_UNITS = dict(
+ chain.from_iterable(
+ zip_longest(td_units, (TD64_UNITS[i],), fillvalue=TD64_UNITS[i])
+ for i, td_units in enumerate(TD_UNITS)
+ )
+)
+
+TD_MAX_PER_KWARG = {
+ "nanoseconds": Timedelta.max.value,
+ "microseconds": Timedelta.max.value // 1_000,
+ "milliseconds": Timedelta.max.value // 1_000_000,
+ "seconds": Timedelta.max.value // 1_000_000_000,
+ "minutes": Timedelta.max.value // (1_000_000_000 * 60),
+ "hours": Timedelta.max.value // (1_000_000_000 * 60 * 60),
+ "days": Timedelta.max.value // (1_000_000_000 * 60 * 60 * 24),
+ "weeks": Timedelta.max.value // (1_000_000_000 * 60 * 60 * 24 * 7),
+}
+TD_MIN_PER_KWARG = {
+ "nanoseconds": Timedelta.min.value,
+ "microseconds": Timedelta.min.value // 1_000 + 1,
+ "milliseconds": Timedelta.min.value // 1_000_000 + 1,
+ "seconds": Timedelta.min.value // 1_000_000_000 + 1,
+ "minutes": Timedelta.min.value // (1_000_000_000 * 60) + 1,
+ "hours": Timedelta.min.value // (1_000_000_000 * 60 * 60) + 1,
+ "days": Timedelta.min.value // (1_000_000_000 * 60 * 60 * 24) + 1,
+ "weeks": Timedelta.min.value // (1_000_000_000 * 60 * 60 * 24 * 7) + 1,
+}
+# simplified to include only one key corresponding to each unit
+TD_MAX_PER_UNIT = dict(zip(TD_UNITS_UNIQUE, TD_MAX_PER_KWARG.values()))
+TD_MIN_PER_UNIT = dict(zip(TD_UNITS_UNIQUE, TD_MIN_PER_KWARG.values()))
+TD64_MAX_PER_UNIT = dict(zip(TD64_UNITS, TD_MAX_PER_KWARG.values()))
+TD64_MIN_PER_UNIT = dict(zip(TD64_UNITS, TD_MIN_PER_KWARG.values()))
+
+xfail_does_not_raise = pytest.mark.xfail(
+ reason="does not raise",
+ raises=pytest.fail.Exception,
+ strict=True,
+)
+skip_ns = lambda s: (u for u in s if not u.startswith("n"))
+
+
+@pytest.fixture(name="timedelta_kwarg", params=skip_ns(TD_KWARGS))
+def fixture_timedelta_kwarg(request) -> str:
+ return request.param
+
+
+@pytest.fixture(name="td_max_per_unit", params=TD_MAX_PER_UNIT)
+def fixture_td_max_per_unit(request) -> tuple:
+ unit = request.param
+ if request.cls is TestOverflow and unit == "w":
+ request.applymarker(xfail_does_not_raise)
+
+ return unit, TD_MAX_PER_UNIT[unit]
+
+
+@pytest.fixture(name="td_min_per_unit", params=TD_MIN_PER_UNIT)
+def fixture_td_min_per_unit(request) -> tuple:
+ unit = request.param
+ if request.cls is TestOverflow and unit == "w":
+ request.applymarker(xfail_does_not_raise)
+
+ return unit, TD_MIN_PER_UNIT[unit]
+
+
+@pytest.fixture(name="td_max_per_kwarg", params=TD_MAX_PER_KWARG)
+def fixture_td_max_per_kwarg(request) -> tuple:
+ kwarg = request.param
+ return kwarg, TD_MAX_PER_KWARG[kwarg]
+
+
+@pytest.fixture(name="td_min_per_kwarg", params=TD_MIN_PER_KWARG)
+def fixture_td_min_per_kwarg(request) -> tuple:
+ kwarg = request.param
+ return kwarg, TD_MIN_PER_KWARG[kwarg]
+
+
+@pytest.fixture(name="td64_max_per_unit", params=skip_ns(TD64_MAX_PER_UNIT))
+def fixture_td64_max_per_unit(request) -> tuple:
+ unit = request.param
+ return unit, TD64_MAX_PER_UNIT[unit]
+
+
+@pytest.fixture(name="td64_min_per_unit", params=skip_ns(TD64_MIN_PER_UNIT))
+def fixture_td64_min_per_unit(request) -> tuple:
+ unit = request.param
+ return unit, TD64_MIN_PER_UNIT[unit]
+
+
+@pytest.fixture(name="non_nano_reso", params=(7, 8, 9))
+def fixture_non_nano_reso(request):
+ """7, 8, 9 correspond to second, millisecond, and microsecond, respectively"""
+ return request.param
+
+
+@pytest.fixture(name="non_nano_td")
+def fixture_non_nano_td(non_nano_reso: int) -> Timedelta:
+ # microsecond that would be just out of bounds for nano
+ us = np.int64((TD_MAX_PER_KWARG["days"] + 1) * 86_400 * 1_000_000)
+ values = {
+ 9: us,
+ 8: us // 1000,
+ 7: us // 1_000_000,
+ }
+
+ return Timedelta._from_value_and_reso(values[non_nano_reso], non_nano_reso)
+
+
+class TestConstruction:
+ """
+ Tests of the public constructor, Timedelta.__new__().
+ """
+
+ def test_type(self):
+ td = Timedelta(1)
+
+ assert isinstance(td, Timedelta)
+ assert isinstance(td, timedelta)
+
+ @pytest.mark.parametrize("td_unit, td64_unit", TD_UNITS_TD64_UNITS.items())
+ def test_from_value_and_unit(
+ self,
+ td_unit: str,
+ td64_unit: str,
+ any_real_numpy_dtype: str,
+ ):
+ """GH#8757: test construction with np dtypes"""
+ expected_ns = np.timedelta64(1, td64_unit).astype("m8[ns]").view("i8")
+ one = np.dtype(any_real_numpy_dtype).type(1)
+ td = Timedelta(one, td_unit)
+
+ assert td.value == expected_ns
+
+ @pytest.mark.parametrize("subset", map(slice, range(1, len(TD_UNITS_UNIQUE))))
+ def test_from_str(self, subset: slice):
+ """GH#8190"""
+ td64s = tuple(np.timedelta64(1, u) for u in TD64_UNITS[subset])
+ str_value = " ".join(tuple(f"1 {u}" for u in TD_UNITS_UNIQUE[subset]))
+ expected_ns = np.sum(td64s).astype("m8[ns]").view("i8")
+ td = Timedelta(str_value)
+ neg_td = Timedelta("-" + str_value)
+
+ assert td.value == expected_ns
+ assert neg_td.value == -1 * expected_ns
+
+ @pytest.mark.parametrize(
+ "value, expected_hours",
+ (
+ ("0:00:00", 0),
+ ("1:00:00", 1),
+ ),
+ )
+ def test_from_str_with_without_leading_zero(self, value: str, expected_hours: int):
+ """GH#9570"""
+ expected_ns = np.timedelta64(expected_hours, "h").astype("m8[ns]").view("i8")
+ td0 = Timedelta(value)
+ td1 = Timedelta("0" + value)
+
+ assert td0.value == expected_ns
+ assert td1.value == expected_ns
+
+ @pytest.mark.parametrize(
+ ("value", "expected"),
+ (
+ (
+ "P6DT0H50M3.010010012S",
+ Timedelta(
+ days=6,
+ minutes=50,
+ seconds=3,
+ milliseconds=10,
+ microseconds=10,
+ nanoseconds=12,
+ ),
+ ),
+ (
+ "P-6DT0H50M3.010010012S",
+ Timedelta(
+ days=-6,
+ minutes=50,
+ seconds=3,
+ milliseconds=10,
+ microseconds=10,
+ nanoseconds=12,
+ ),
+ ),
+ ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)),
+ ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)),
+ ("P0DT0H0M0.00001S", Timedelta(microseconds=10)),
+ ("P0DT0H0M0.001S", Timedelta(milliseconds=1)),
+ ("P0DT0H1M0S", Timedelta(minutes=1)),
+ ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)),
+ ("PT1S", Timedelta(seconds=1)),
+ ("PT0S", Timedelta(seconds=0)),
+ ("P1WT0S", Timedelta(days=7, seconds=0)),
+ ("P1D", Timedelta(days=1)),
+ ("P1DT1H", Timedelta(days=1, hours=1)),
+ ("P1W", Timedelta(days=7)),
+ ("PT300S", Timedelta(seconds=300)),
+ ("P1DT0H0M00000000000S", Timedelta(days=1)),
+ ("PT-6H3M", Timedelta(hours=-6, minutes=3)),
+ ("-PT6H3M", Timedelta(hours=-6, minutes=-3)),
+ ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)),
+ ),
+ )
+ def test_from_isoformat_str(self, value: str, expected: Timedelta):
+ assert Timedelta(value) == expected
+
+ @pytest.mark.parametrize("subset", map(slice, range(1, len(TD_KWARGS))))
+ def test_from_kwargs(self, subset: slice, any_real_numpy_dtype: str):
+ td64s = tuple(np.timedelta64(1, u) for u in TD64_UNITS[subset])
+ kwargs = {u: np.dtype(any_real_numpy_dtype).type(1) for u in TD_KWARGS[subset]}
+ expected_ns = np.sum(td64s).astype("m8[ns]").view("i8")
+ td = Timedelta(**kwargs)
+
+ assert td.value == expected_ns
+
+ @pytest.mark.parametrize("td_unit, td_kwarg", TD_UNITS_TD_KWARGS.items())
+ def test_kwarg_unit_equivalence(self, request, td_unit: str, td_kwarg: str):
+ if td_kwarg == "weeks":
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="this one isn't valid",
+ raises=ValueError,
+ strict=True,
+ )
+ )
+
+ from_unit = Timedelta(1, td_unit)
+ from_kwarg = Timedelta(**{td_kwarg: 1}) # type: ignore[arg-type]
+ from_str_unit = Timedelta(f"1 {td_unit}")
+ from_str_kwarg = Timedelta(f"1 {td_kwarg}")
+
+ assert from_unit == from_kwarg == from_str_unit == from_str_kwarg
+
+ @pytest.mark.parametrize(
+ "value, td_unit, expected_ns",
+ (
+ (9.123, "us", 9123),
+ (9.123456, "ms", 9123456),
+ (9.123456789, "s", 9123456789),
+ ),
+ )
+ def test_float_values_not_rounded(
+ self,
+ value: float,
+ td_unit: str,
+ expected_ns: int,
+ ):
+ """GH#12690"""
+ td_kwarg = TD_UNITS_TD_KWARGS[td_unit]
+ from_float = Timedelta(value, td_unit)
+ from_str = Timedelta(f"{value} {td_unit}")
+ from_kwarg = Timedelta(**{td_kwarg: value}) # type: ignore[arg-type]
+
+ assert from_float.value == expected_ns
+ assert from_str.value == expected_ns
+ assert from_kwarg.value == expected_ns
+
+ def test_from_offset(self, tick_classes):
+ offset = tick_classes(1)
+ assert Timedelta(offset).value == offset.nanos
+
+ @pytest.mark.parametrize("td_unit", chain.from_iterable(TD_UNITS))
+ def test_from_td64_ignores_unit(self, td_unit: str, td_overflow_msg: str):
+ """
+ Ignore the unit, as it may cause silently overflows leading to incorrect
+ results, and in non-overflow cases is irrelevant GH#46827.
+ """
+ td64 = np.timedelta64(TD64_MAX_PER_UNIT["h"], "h")
+
+ assert Timedelta(td64, td_unit) == Timedelta(td64)
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(td64 * 2, td_unit)
+
+ @pytest.mark.parametrize(
+ ("args", "kwargs"),
+ [
+ ((), {}),
+ (("ns",), {}),
+ (("ms",), {}),
+ ((), {"seconds": 3}),
+ (("ns",), {"minutes": 2}),
+ ],
+ )
+ def test_from_td_ignores_other_args(self, args: tuple, kwargs: dict):
+ original = Timedelta(1)
+ new = Timedelta(original, *args, **kwargs)
+
+ assert new == original
+
+ def test_from_timedelta(self, timedelta_kwarg: str):
+ kwargs = {timedelta_kwarg: 1}
+ assert Timedelta(**kwargs) == timedelta(**kwargs) # type: ignore[arg-type]
+
+ @pytest.mark.parametrize(
+ "value",
+ (
+ None,
+ np.nan,
+ NaT,
+ pytest.param(
+ NA,
+ marks=pytest.mark.xfail(
+ reason="constructor fails",
+ raises=ValueError,
+ strict=True,
+ ),
+ ),
+ ),
+ ids=("None", "np.nan", "pd.NaT", "pd.NA"),
+ )
+ def test_from_na_value_returns_nat(self, value):
+ assert Timedelta(value) is NaT
+
+ def test_raises_if_no_args_passed(self):
+ msg = (
+ "cannot construct a Timedelta without a value/unit or descriptive keywords"
+ )
+
+ with pytest.raises(ValueError, match=msg):
+ Timedelta()
+
+ @pytest.mark.parametrize("unit", ("years", "months", "day", "ps", "reso", "_reso"))
+ def test_raises_for_invalid_kwarg(self, unit: str):
+ msg = "cannot construct a Timedelta from the passed arguments"
+
+ with pytest.raises(ValueError, match=msg):
+ Timedelta(**{unit: 1}) # type: ignore[arg-type]
+
+ def test_raises_if_kwarg_has_str_value(self):
+ msg = "Invalid type <class 'str'>. Must be int or float."
+
+ with pytest.raises(TypeError, match=msg):
+ Timedelta(nanoseconds="1")
+
+ @pytest.mark.parametrize(
+ ("constructor", "value", "unit", "msg"),
+ (
+ (Timedelta, "10s", "ms", "the value is a str"),
+ (to_timedelta, "10s", "ms", "the input is/contains a str"),
+ (to_timedelta, ["1", "2", "3"], "s", "the input contains a str"),
+ ),
+ ids=("Timedelta", "to_timedelta-scalar", "to_timedelta-sequence"),
+ )
+ def test_raises_if_both_str_value_and_unit_passed(
+ self,
+ constructor,
+ value,
+ unit,
+ msg,
+ ):
+ msg = "unit must not be specified if " + msg
+
+ with pytest.raises(ValueError, match=msg):
+ constructor(value, unit=unit)
+
+ @pytest.mark.parametrize(
+ "value",
+ [
+ "PPPPPPPPPPPP",
+ "PDTHMS",
+ "P0DT999H999M999S",
+ "P1DT0H0M0.0000000000000S",
+ "P1DT0H0M0.S",
+ "P",
+ "-P",
+ ],
+ )
+ def test_raises_for_invalid_isolike_str_value(self, value):
+ msg = f"Invalid ISO 8601 Duration format - {value}"
+
+ with pytest.raises(ValueError, match=msg):
+ Timedelta(value)
+
+ def test_raises_if_str_value_contains_no_units(self):
+ msg = "no units specified"
+
+ with pytest.raises(ValueError, match=msg):
+ Timedelta("3.1415")
+
+ @pytest.mark.parametrize(
+ ("value", "msg"),
+ (
+ ("us", "unit abbreviation w/o a number"),
+ ("seconds", "unit abbreviation w/o a number"),
+ ("garbage", "unit abbreviation w/o a number"),
+ # GH39710 Timedelta input string with only symbols and no digits raises
+ ("+", "symbols w/o a number"),
+ ("-", "symbols w/o a number"),
+ ),
+ )
+ def test_raises_if_str_value_has_no_numeric_component(self, value: str, msg: str):
+ with pytest.raises(ValueError, match=msg):
+ Timedelta(value)
+
+ @pytest.mark.parametrize(
+ "value",
+ (
+ "--",
+ # Currently invalid as it has a - on the hh:mm:dd part
+ # (only allowed on the days)
+ "-10 days -1 h 1.5m 1s 3us",
+ "10 days -1 h 1.5m 1s 3us",
+ ),
+ )
+ def test_raises_for_str_value_with_second_minus_sign(self, value: str):
+ msg = "only leading negative signs are allowed"
+ with pytest.raises(ValueError, match=msg):
+ Timedelta(value)
+
+ @pytest.mark.parametrize(
+ ("unit", "func"),
+ product(("Y", "y", "M"), (Timedelta, to_timedelta)),
+ )
+ def test_warns_or_raises_if_ambiguous_unit_passed(self, unit: str, func):
+ msg = "Units 'M', 'Y', and 'y' are no longer supported"
+
+ with pytest.raises(ValueError, match=msg):
+ func(1, unit)
+
+ def test_reso_invariant_if_td_created_via_public_api(self, td_max_per_unit: tuple):
+ unit, max_value = td_max_per_unit
+ td_small = Timedelta(1, unit)
+ td_max = Timedelta(max_value, unit)
+ msg = "attribute '_reso' of 'pandas._libs.tslibs.timedeltas._Timedelta'"
+
+ assert getattr(td_small, "_reso") == 10
+ assert getattr(td_max, "_reso") == 10
+ with pytest.raises(AttributeError, match=msg):
+ setattr(td_max, "_reso", 9)
+
+ def test_reso_configurable_via_private_api(self, non_nano_reso: int):
+ td = Timedelta._from_value_and_reso(np.int64(1), non_nano_reso)
+ assert td.value == 1
+ assert getattr(td, "_reso") == non_nano_reso
+
+
+class TestOverflow:
+ def test_value_unit_too_big(self, td_max_per_unit: tuple, td_overflow_msg: str):
+ unit, value = td_max_per_unit
+
+ assert Timedelta(value, unit) <= Timedelta.max
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(value + 1, unit)
+
+ def test_value_unit_too_small(self, td_min_per_unit: tuple, td_overflow_msg: str):
+ unit, value = td_min_per_unit
+ too_small = value - 1
+
+ assert Timedelta(value, unit) >= Timedelta.min
+ if unit == "n":
+ result = Timedelta(too_small, unit)
+ assert result is NaT # type: ignore[comparison-overlap]
+ too_small -= 1
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(too_small, unit)
+
+ def test_kwarg_too_big(self, td_max_per_kwarg: tuple, td_overflow_msg: str):
+ kwarg, value = td_max_per_kwarg
+
+ assert Timedelta(**{kwarg: value}) <= Timedelta.max
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ assert Timedelta(**{kwarg: value + 1})
+
+ def test_kwarg_too_small(self, td_min_per_kwarg: tuple, td_overflow_msg: str):
+ kwarg, value = td_min_per_kwarg
+ too_small = value - 1
+
+ assert Timedelta(**{kwarg: value}) >= Timedelta.min
+ if kwarg == "nanoseconds":
+ result = Timedelta(**{kwarg: too_small})
+ assert result is NaT # type: ignore[comparison-overlap]
+ too_small -= 1
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(**{kwarg: too_small})
+
+ def test_from_timedelta_too_big(self, timedelta_kwarg: str, td_overflow_msg: str):
+ max_val = TD_MAX_PER_KWARG[timedelta_kwarg]
+
+ assert Timedelta(timedelta(**{timedelta_kwarg: max_val})) <= Timedelta.max
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(timedelta(**{timedelta_kwarg: max_val + 1}))
+
+ def test_from_timedelta_too_small(self, timedelta_kwarg: str, td_overflow_msg: str):
+ min_val = TD_MIN_PER_KWARG[timedelta_kwarg]
+
+ assert Timedelta(timedelta(**{timedelta_kwarg: min_val})) >= Timedelta.min
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(timedelta(**{timedelta_kwarg: min_val - 1}))
+
+ def test_from_td64_too_big(self, td64_max_per_unit: tuple, td_overflow_msg: str):
+ unit, value = td64_max_per_unit
+
+ assert Timedelta(np.timedelta64(value, unit)) <= Timedelta.max
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(np.timedelta64(value + 1, unit))
+
+ def test_from_td64_too_small(self, td64_min_per_unit: tuple, td_overflow_msg: str):
+ unit, value = td64_min_per_unit
+
+ assert Timedelta(np.timedelta64(value, unit)) >= Timedelta.min
+ with pytest.raises(OutOfBoundsTimedelta, match=td_overflow_msg):
+ Timedelta(np.timedelta64(value - 1, unit))
+
class TestNonNano:
- @pytest.fixture(params=[7, 8, 9])
- def unit(self, request):
- # 7, 8, 9 correspond to second, millisecond, and microsecond, respectively
- return request.param
-
- @pytest.fixture
- def val(self, unit):
- # microsecond that would be just out of bounds for nano
- us = 9223372800000000
- if unit == 9:
- value = us
- elif unit == 8:
- value = us // 1000
- else:
- value = us // 1_000_000
- return value
-
- @pytest.fixture
- def td(self, unit, val):
- return Timedelta._from_value_and_reso(val, unit)
-
- def test_from_value_and_reso(self, unit, val):
- # Just checking that the fixture is giving us what we asked for
- td = Timedelta._from_value_and_reso(val, unit)
- assert td.value == val
- assert td._reso == unit
- assert td.days == 106752
-
- def test_unary_non_nano(self, td, unit):
- assert abs(td)._reso == unit
- assert (-td)._reso == unit
- assert (+td)._reso == unit
-
- def test_sub_preserves_reso(self, td, unit):
- res = td - td
- expected = Timedelta._from_value_and_reso(0, unit)
+ """
+ WIP.
+ """
+
+ def test_unary_non_nano(self, non_nano_td, non_nano_reso):
+ assert abs(non_nano_td)._reso == non_nano_reso
+ assert (-non_nano_td)._reso == non_nano_reso
+ assert (+non_nano_td)._reso == non_nano_reso
+
+ def test_sub_preserves_reso(self, non_nano_td, non_nano_reso):
+ res = non_nano_td - non_nano_td
+ expected = Timedelta._from_value_and_reso(0, non_nano_reso)
assert res == expected
- assert res._reso == unit
+ assert res._reso == non_nano_reso
- def test_mul_preserves_reso(self, td, unit):
- # The td fixture should always be far from the implementation
+ @pytest.mark.parametrize("factor", (2, 2.5))
+ def test_mul_preserves_reso(self, non_nano_td, non_nano_reso, factor):
+ # The non_nano_td fixture should always be far from the implementation
# bound, so doubling does not risk overflow.
- res = td * 2
- assert res.value == td.value * 2
- assert res._reso == unit
+ res = non_nano_td * factor
+ assert res.value == non_nano_td.value * factor
+ assert res._reso == non_nano_reso
- def test_cmp_cross_reso(self, td):
+ def test_cmp_cross_reso(self, non_nano_td):
# numpy gets this wrong because of silent overflow
- other = Timedelta(days=106751, unit="ns")
- assert other < td
- assert td > other
- assert not other == td
- assert td != other
-
- def test_to_pytimedelta(self, td):
- res = td.to_pytimedelta()
+ assert Timedelta.max < non_nano_td
+ assert non_nano_td > Timedelta.max
+ assert not Timedelta.max == non_nano_td
+ assert non_nano_td != Timedelta.max
+
+ def test_to_pytimedelta(self, non_nano_td):
+ res = non_nano_td.to_pytimedelta()
expected = timedelta(days=106752)
assert type(res) is timedelta
assert res == expected
- def test_to_timedelta64(self, td, unit):
- for res in [td.to_timedelta64(), td.to_numpy(), td.asm8]:
+ @pytest.mark.parametrize(
+ "converter",
+ (
+ operator.methodcaller("to_timedelta64"),
+ operator.methodcaller("to_numpy"),
+ operator.attrgetter("asm8"),
+ ),
+ )
+ def test_to_timedelta64(self, non_nano_td, converter):
+ td64 = converter(non_nano_td)
+ reso_dtype = {7: "m8[s]", 8: "m8[ms]", 9: "m8[us]"}
- assert isinstance(res, np.timedelta64)
- assert res.view("i8") == td.value
- if unit == 7:
- assert res.dtype == "m8[s]"
- elif unit == 8:
- assert res.dtype == "m8[ms]"
- elif unit == 9:
- assert res.dtype == "m8[us]"
+ assert isinstance(td64, np.timedelta64)
+ assert td64.view("i8") == non_nano_td.value
+ assert td64.dtype == reso_dtype[non_nano_td._reso]
-class TestTimedeltaUnaryOps:
+class TestUnaryOps:
def test_invert(self):
td = Timedelta(10, unit="d")
@@ -128,294 +660,144 @@ def test_unary_ops(self):
assert abs(-td) == Timedelta("10d")
-class TestTimedeltas:
- @pytest.mark.parametrize(
- "unit, value, expected",
- [
- ("us", 9.999, 9999),
- ("ms", 9.999999, 9999999),
- ("s", 9.999999999, 9999999999),
- ],
- )
- def test_rounding_on_int_unit_construction(self, unit, value, expected):
- # GH 12690
- result = Timedelta(value, unit=unit)
- assert result.value == expected
- result = Timedelta(str(value) + unit)
- assert result.value == expected
-
- def test_total_seconds_scalar(self):
- # see gh-10939
- rng = Timedelta("1 days, 10:11:12.100123456")
- expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
- tm.assert_almost_equal(rng.total_seconds(), expt)
+class TestAttributes:
+ def test_min_max_correspond_to_int64_boundaries(self):
+ """GH#12727"""
+ assert Timedelta.min.value == iNaT + 1
+ assert Timedelta.max.value == lib.i8max
- rng = Timedelta(np.nan)
- assert np.isnan(rng.total_seconds())
+ def test_fields(self):
+ """GH#10050: compat with datetime.timedelta; GH#31354"""
+ fields = ("days", "seconds", "microseconds", "nanoseconds")
+ td = Timedelta("1 days, 10:11:12")
- def test_conversion(self):
+ assert td.days == 1
+ assert td.seconds == 10 * 3600 + 11 * 60 + 12
+ assert td.microseconds == 0
+ assert td.nanoseconds == 0
+ assert all(isinstance(v, int) for v in operator.attrgetter(*fields)(td))
+ assert td.days * 24 * 3600 * int(1e9) + td.seconds * int(1e9) == td.value
- for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
- pydt = td.to_pytimedelta()
- assert td == Timedelta(pydt)
- assert td == pydt
- assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
+ @pytest.mark.parametrize("field", ("hours", "minutes", "milliseconds"))
+ def test_fields_not_exposed(self, field: str):
+ msg = f"'Timedelta' object has no attribute '{field}'"
- assert td == np.timedelta64(td.value, "ns")
- td64 = td.to_timedelta64()
+ with pytest.raises(AttributeError, match=msg):
+ getattr(Timedelta.max, field)
- assert td64 == np.timedelta64(td.value, "ns")
- assert td == td64
+ @pytest.mark.parametrize(
+ "td, expected_values",
+ (
+ (Timedelta("-1 us"), (-1, 23, 59, 59, 999, 999, 0)),
+ (Timedelta("-1 days 1 us"), (-2, 23, 59, 59, 999, 999, 0)),
+ ),
+ )
+ def test_components(self, td, expected_values: tuple[int]):
+ values = operator.attrgetter(*TD_COMPONENTS)(td.components)
- assert isinstance(td64, np.timedelta64)
+ assert values == expected_values
+ assert all(isinstance(v, int) for v in values)
- # this is NOT equal and cannot be roundtripped (because of the nanos)
- td = Timedelta("1 days, 10:11:12.012345678")
- assert td != td.to_pytimedelta()
+ def test_resolution_string(self):
+ assert Timedelta(days=1).resolution_string == "D"
+ assert Timedelta(hours=1).resolution_string == "H"
+ assert Timedelta(minutes=1).resolution_string == "T"
+ assert Timedelta(seconds=1).resolution_string == "S"
+ assert Timedelta(milliseconds=1).resolution_string == "L"
+ assert Timedelta(microseconds=1).resolution_string == "U"
+ assert Timedelta(nanoseconds=1).resolution_string == "N"
- def test_fields(self):
- def check(value):
- # that we are int
- assert isinstance(value, int)
-
- # compat to datetime.timedelta
- rng = to_timedelta("1 days, 10:11:12")
- assert rng.days == 1
- assert rng.seconds == 10 * 3600 + 11 * 60 + 12
- assert rng.microseconds == 0
- assert rng.nanoseconds == 0
-
- msg = "'Timedelta' object has no attribute '{}'"
- with pytest.raises(AttributeError, match=msg.format("hours")):
- rng.hours
- with pytest.raises(AttributeError, match=msg.format("minutes")):
- rng.minutes
- with pytest.raises(AttributeError, match=msg.format("milliseconds")):
- rng.milliseconds
-
- # GH 10050
- check(rng.days)
- check(rng.seconds)
- check(rng.microseconds)
- check(rng.nanoseconds)
-
- td = Timedelta("-1 days, 10:11:12")
- assert abs(td) == Timedelta("13:48:48")
- assert str(td) == "-1 days +10:11:12"
- assert -td == Timedelta("0 days 13:48:48")
- assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
- assert Timedelta("-1 days, 10:11:12").value == -49728000000000
-
- rng = to_timedelta("-1 days, 10:11:12.100123456")
- assert rng.days == -1
- assert rng.seconds == 10 * 3600 + 11 * 60 + 12
- assert rng.microseconds == 100 * 1000 + 123
- assert rng.nanoseconds == 456
- msg = "'Timedelta' object has no attribute '{}'"
- with pytest.raises(AttributeError, match=msg.format("hours")):
- rng.hours
- with pytest.raises(AttributeError, match=msg.format("minutes")):
- rng.minutes
- with pytest.raises(AttributeError, match=msg.format("milliseconds")):
- rng.milliseconds
-
- # components
- tup = to_timedelta(-1, "us").components
- assert tup.days == -1
- assert tup.hours == 23
- assert tup.minutes == 59
- assert tup.seconds == 59
- assert tup.milliseconds == 999
- assert tup.microseconds == 999
- assert tup.nanoseconds == 0
-
- # GH 10050
- check(tup.days)
- check(tup.hours)
- check(tup.minutes)
- check(tup.seconds)
- check(tup.milliseconds)
- check(tup.microseconds)
- check(tup.nanoseconds)
-
- tup = Timedelta("-1 days 1 us").components
- assert tup.days == -2
- assert tup.hours == 23
- assert tup.minutes == 59
- assert tup.seconds == 59
- assert tup.milliseconds == 999
- assert tup.microseconds == 999
- assert tup.nanoseconds == 0
+ @pytest.mark.parametrize("td_units", TD_UNITS)
+ def test_resolution_is_class_attr(self, td_units: str):
+ """GH#21344; mirrors datetime.timedelta"""
+ td = Timedelta(1, td_units[0])
- def test_iso_conversion(self):
- # GH #21877
- expected = Timedelta(1, unit="s")
- assert to_timedelta("P0DT0H0M1S") == expected
+ assert td.resolution is Timedelta.resolution
+ assert Timedelta.resolution == Timedelta(1, "ns")
- def test_nat_converters(self):
- result = to_timedelta("nat").to_numpy()
- assert result.dtype.kind == "M"
- assert result.astype("int64") == iNaT
+ def test_asm8_is_alias_for_to_timedelta64(self):
+ result = Timedelta.max.asm8
- result = to_timedelta("nan").to_numpy()
- assert result.dtype.kind == "M"
- assert result.astype("int64") == iNaT
+ assert result == Timedelta.max.to_timedelta64()
+ assert isinstance(result, np.timedelta64)
@pytest.mark.parametrize(
- "unit, np_unit",
- [(value, "W") for value in ["W", "w"]]
- + [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
- + [
- (value, "m")
- for value in [
- "m",
- "minute",
- "min",
- "minutes",
- "t",
- "Minute",
- "Min",
- "Minutes",
- "T",
- ]
- ]
- + [
- (value, "s")
- for value in [
- "s",
- "seconds",
- "sec",
- "second",
- "S",
- "Seconds",
- "Sec",
- "Second",
- ]
- ]
- + [
- (value, "ms")
- for value in [
- "ms",
- "milliseconds",
- "millisecond",
- "milli",
- "millis",
- "l",
- "MS",
- "Milliseconds",
- "Millisecond",
- "Milli",
- "Millis",
- "L",
- ]
- ]
- + [
- (value, "us")
- for value in [
- "us",
- "microseconds",
- "microsecond",
- "micro",
- "micros",
- "u",
- "US",
- "Microseconds",
- "Microsecond",
- "Micro",
- "Micros",
- "U",
- ]
- ]
- + [
- (value, "ns")
- for value in [
- "ns",
- "nanoseconds",
- "nanosecond",
- "nano",
- "nanos",
- "n",
- "NS",
- "Nanoseconds",
- "Nanosecond",
- "Nano",
- "Nanos",
- "N",
- ]
- ],
+ "attr, expected_value",
+ (("delta", 1), ("freq", None), ("is_populated", False)),
)
- @pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
- def test_unit_parser(self, unit, np_unit, wrapper):
- # validate all units, GH 6855, GH 21762
- # array-likes
- expected = TimedeltaIndex(
- [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
- )
- result = to_timedelta(wrapper(range(5)), unit=unit)
- tm.assert_index_equal(result, expected)
- result = TimedeltaIndex(wrapper(range(5)), unit=unit)
- tm.assert_index_equal(result, expected)
+ def test_deprecated_attrs(self, attr: str, expected_value):
+ """GH#46430, GH#46476"""
+ td = Timedelta(1, "ns")
+ msg = f"Timedelta.{attr}"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ getattr(td, attr) == expected_value
- str_repr = [f"{x}{unit}" for x in np.arange(5)]
- result = to_timedelta(wrapper(str_repr))
- tm.assert_index_equal(result, expected)
- result = to_timedelta(wrapper(str_repr))
- tm.assert_index_equal(result, expected)
-
- # scalar
- expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
- result = to_timedelta(2, unit=unit)
- assert result == expected
- result = Timedelta(2, unit=unit)
- assert result == expected
+ with pytest.raises(AttributeError, match="is not writable"):
+ setattr(td, attr, "coconut")
- result = to_timedelta(f"2{unit}")
- assert result == expected
- result = Timedelta(f"2{unit}")
- assert result == expected
- @pytest.mark.parametrize("unit", ["Y", "y", "M"])
- def test_unit_m_y_raises(self, unit):
- msg = "Units 'M', 'Y', and 'y' are no longer supported"
- with pytest.raises(ValueError, match=msg):
- Timedelta(10, unit)
+class TestMethods:
+ @pytest.mark.parametrize(
+ "value, expected",
+ (
+ (
+ "1 days, 10:11:12.123456789",
+ 1 * 86400 + 10 * 3600 + 11 * 60 + 12.123456,
+ ),
+ ("30S", 30.0),
+ ("0", 0.0),
+ ("-2S", -2.0),
+ ("5.324S", 5.324),
+ ),
+ )
+ def test_total_seconds(self, value: str, expected: float):
+ # see gh-10939
+ td = Timedelta(value)
+ assert td.total_seconds() == expected
- with pytest.raises(ValueError, match=msg):
- to_timedelta(10, unit)
+ def test_to_pytimedelta(self):
+ td = Timedelta("1 days, 10:11:12.012345")
+ py_td = td.to_pytimedelta()
- with pytest.raises(ValueError, match=msg):
- to_timedelta([1, 2], unit)
+ assert py_td == td
+ assert Timedelta(py_td) == td
+ assert isinstance(py_td, timedelta)
+ assert not isinstance(py_td, Timedelta)
- def test_numeric_conversions(self):
- assert Timedelta(0) == np.timedelta64(0, "ns")
- assert Timedelta(10) == np.timedelta64(10, "ns")
- assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
+ @pytest.mark.parametrize(
+ "td, expected",
+ (
+ (Timedelta(500, "ns"), timedelta(0)),
+ (Timedelta(501, "ns"), timedelta(microseconds=1)),
+ ),
+ )
+ def test_to_pytimedelta_rounds_ns(self, td: Timedelta, expected: timedelta):
+ assert td.to_pytimedelta() == expected
- assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
- assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
- assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
- assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
+ def test_to_timedelta64(self):
+ td64 = Timedelta.max.to_timedelta64()
- def test_timedelta_conversions(self):
- assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
- "m8[ns]"
- )
- assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
- "m8[ns]"
- )
- assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
+ assert td64 == Timedelta.max
+ assert Timedelta(td64) == Timedelta.max
+ assert isinstance(td64, np.timedelta64)
- def test_to_numpy_alias(self):
- # GH 24653: alias .to_numpy() for scalars
- td = Timedelta("10m7s")
- assert td.to_timedelta64() == td.to_numpy()
+ def test_to_numpy(self):
+ """GH#24653: alias .to_numpy() for scalars"""
+ assert Timedelta.max.to_timedelta64() == Timedelta.max.to_numpy()
+ @pytest.mark.parametrize(
+ "args, kwargs",
+ (
+ (("m8[ns]",), {}),
+ ((), {"copy": True}),
+ (("m8[ns]",), {"copy": True}),
+ ),
+ )
+ def test_to_numpy_raises_if_args_passed(self, args, kwargs):
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
- td.to_numpy("m8[s]")
- with pytest.raises(ValueError, match=msg):
- td.to_numpy(copy=True)
+ Timedelta.max.to_numpy(*args, **kwargs)
@pytest.mark.parametrize(
"freq,s1,s2",
@@ -527,238 +909,162 @@ def test_round_sanity(self, val, method):
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
- def test_contains(self):
- # Checking for any NaT-like objects
- # GH 13603
- td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
- for v in [NaT, None, float("nan"), np.nan]:
- assert not (v in td)
-
- td = to_timedelta([NaT])
- for v in [NaT, None, float("nan"), np.nan]:
- assert v in td
-
- def test_identity(self):
-
- td = Timedelta(10, unit="d")
- assert isinstance(td, Timedelta)
- assert isinstance(td, timedelta)
-
- def test_short_format_converters(self):
- def conv(v):
- return v.astype("m8[ns]")
-
- assert Timedelta("10") == np.timedelta64(10, "ns")
- assert Timedelta("10ns") == np.timedelta64(10, "ns")
- assert Timedelta("100") == np.timedelta64(100, "ns")
- assert Timedelta("100ns") == np.timedelta64(100, "ns")
-
- assert Timedelta("1000") == np.timedelta64(1000, "ns")
- assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
- assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
-
- assert Timedelta("10us") == np.timedelta64(10000, "ns")
- assert Timedelta("100us") == np.timedelta64(100000, "ns")
- assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
- assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
- assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
-
- assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
- assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
- assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
- assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
-
- assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
- assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
- assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
- assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
- assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
-
- assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
- assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
- assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
- assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
- assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
- assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
- assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
-
- # space
- assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
- assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
-
- # invalid
- msg = "invalid unit abbreviation"
- with pytest.raises(ValueError, match=msg):
- Timedelta("1foo")
- msg = "unit abbreviation w/o a number"
- with pytest.raises(ValueError, match=msg):
- Timedelta("foo")
-
- def test_full_format_converters(self):
- def conv(v):
- return v.astype("m8[ns]")
-
- d1 = np.timedelta64(1, "D")
-
- assert Timedelta("1days") == conv(d1)
- assert Timedelta("1days,") == conv(d1)
- assert Timedelta("- 1days,") == -conv(d1)
-
- assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
- assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
- assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
- assert Timedelta("06:00:01.01") == conv(
- np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
- )
-
- assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
- assert Timedelta("1days, 06:00:01") == conv(
- d1 + np.timedelta64(6 * 3600 + 1, "s")
- )
- assert Timedelta("1days, 06:00:01.01") == conv(
- d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
- )
-
- # invalid
- msg = "have leftover units"
- with pytest.raises(ValueError, match=msg):
- Timedelta("- 1days, 00")
-
def test_pickle(self):
+ assert Timedelta.max == tm.round_trip_pickle(Timedelta.max)
- v = Timedelta("1 days 10:11:12.0123456")
- v_p = tm.round_trip_pickle(v)
- assert v == v_p
+ @pytest.mark.parametrize("num_days", range(20))
+ def test_hash_equals_timedelta_hash(self, num_days: int):
+ """GH#11129"""
+ kwargs = {"days": num_days, "seconds": 1}
+ td = Timedelta(**kwargs) # type: ignore[arg-type]
- def test_timedelta_hash_equality(self):
- # GH 11129
- v = Timedelta(1, "D")
- td = timedelta(days=1)
- assert hash(v) == hash(td)
+ assert hash(td) == hash(timedelta(**kwargs))
- d = {td: 2}
- assert d[v] == 2
+ @pytest.mark.parametrize("ns", (1, 500))
+ def test_hash_differs_from_timedelta_hash_if_ns_lost(self, ns: int):
+ td = Timedelta(ns, "ns")
+ assert hash(td) != hash(td.to_pytimedelta())
- tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
- assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
+ @pytest.mark.parametrize("td_kwarg", TD_KWARGS)
+ def test_only_zero_value_falsy(self, td_kwarg):
+ """GH#21484"""
+ assert bool(Timedelta(**{td_kwarg: 0})) is False
+ assert bool(Timedelta(**{td_kwarg: 1})) is True
+ assert bool(Timedelta(**{td_kwarg: -1})) is True
- # python timedeltas drop ns resolution
- ns_td = Timedelta(1, "ns")
- assert hash(ns_td) != hash(ns_td.to_pytimedelta())
+ @pytest.mark.parametrize(
+ "td, expected_iso",
+ [
+ (
+ Timedelta(days=6, milliseconds=123, nanoseconds=45),
+ "P6DT0H0M0.123000045S",
+ ),
+ (Timedelta(days=4, hours=12, minutes=30, seconds=5), "P4DT12H30M5S"),
+ (Timedelta(nanoseconds=123), "P0DT0H0M0.000000123S"),
+ # trim nano
+ (Timedelta(microseconds=10), "P0DT0H0M0.00001S"),
+ # trim micro
+ (Timedelta(milliseconds=1), "P0DT0H0M0.001S"),
+ # don't strip every 0
+ (Timedelta(minutes=1), "P0DT0H1M0S"),
+ ],
+ )
+ def test_isoformat(self, td, expected_iso):
+ assert td.isoformat() == expected_iso
- def test_implementation_limits(self):
- min_td = Timedelta(Timedelta.min)
- max_td = Timedelta(Timedelta.max)
+ @pytest.mark.parametrize(
+ ("value, expected"),
+ (
+ ("1 W", "7 days 00:00:00"),
+ ("-1 W", "-7 days +00:00:00"),
+ ("1 D", "1 days 00:00:00"),
+ ("-1 D", "-1 days +00:00:00"),
+ ("1 H", "0 days 01:00:00"),
+ ("-1 H", "-1 days +23:00:00"),
+ ("1 m", "0 days 00:01:00"),
+ ("-1 m", "-1 days +23:59:00"),
+ ("1 m", "0 days 00:01:00"),
+ ("-1 m", "-1 days +23:59:00"),
+ ("1 s", "0 days 00:00:01"),
+ ("-1 s", "-1 days +23:59:59"),
+ ("1 ms", "0 days 00:00:00.001000"),
+ ("-1 ms", "-1 days +23:59:59.999000"),
+ ("1 us", "0 days 00:00:00.000001"),
+ ("-1 us", "-1 days +23:59:59.999999"),
+ ("1 ns", "0 days 00:00:00.000000001"),
+ ("-1 ns", "-1 days +23:59:59.999999999"),
+ ),
+ )
+ def test_str_and_repr(self, value: str, expected: str):
+ expected_repr = f"Timedelta('{expected}')"
+ td = Timedelta(value)
- # GH 12727
- # timedelta limits correspond to int64 boundaries
- assert min_td.value == iNaT + 1
- assert max_td.value == lib.i8max
+ assert str(td) == expected
+ assert repr(td) == expected_repr
+ assert Timedelta(expected) == td
- # Beyond lower limit, a NAT before the Overflow
- assert (min_td - Timedelta(1, "ns")) is NaT
- msg = "int too (large|big) to convert"
- with pytest.raises(OverflowError, match=msg):
- min_td - Timedelta(2, "ns")
+class TestToTimedelta:
+ """Move elsewhere"""
- with pytest.raises(OverflowError, match=msg):
- max_td + Timedelta(1, "ns")
+ def test_iso_conversion(self):
+ # GH #21877
+ expected = Timedelta(1, unit="s")
+ assert to_timedelta("P0DT0H0M1S") == expected
- # Same tests using the internal nanosecond values
- td = Timedelta(min_td.value - 1, "ns")
- assert td is NaT
+ def test_nat_converters(self):
+ result = to_timedelta("nat").to_numpy()
+ assert result.dtype.kind == "M"
+ assert result.astype("int64") == iNaT
- with pytest.raises(OverflowError, match=msg):
- Timedelta(min_td.value - 2, "ns")
+ result = to_timedelta("nan").to_numpy()
+ assert result.dtype.kind == "M"
+ assert result.astype("int64") == iNaT
- with pytest.raises(OverflowError, match=msg):
- Timedelta(max_td.value + 1, "ns")
+ def test_contains(self):
+ # Checking for any NaT-like objects
+ # GH 13603
+ td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
+ for v in [NaT, None, float("nan"), np.nan]:
+ assert not (v in td)
- def test_total_seconds_precision(self):
- # GH 19458
- assert Timedelta("30S").total_seconds() == 30.0
- assert Timedelta("0").total_seconds() == 0.0
- assert Timedelta("-2S").total_seconds() == -2.0
- assert Timedelta("5.324S").total_seconds() == 5.324
- assert (Timedelta("30S").total_seconds() - 30.0) < 1e-20
- assert (30.0 - Timedelta("30S").total_seconds()) < 1e-20
+ td = to_timedelta([NaT])
+ for v in [NaT, None, float("nan"), np.nan]:
+ assert v in td
- def test_resolution_string(self):
- assert Timedelta(days=1).resolution_string == "D"
- assert Timedelta(days=1, hours=6).resolution_string == "H"
- assert Timedelta(days=1, minutes=6).resolution_string == "T"
- assert Timedelta(days=1, seconds=6).resolution_string == "S"
- assert Timedelta(days=1, milliseconds=6).resolution_string == "L"
- assert Timedelta(days=1, microseconds=6).resolution_string == "U"
- assert Timedelta(days=1, nanoseconds=6).resolution_string == "N"
+ # invalid
+ msg = "have leftover units"
+ with pytest.raises(ValueError, match=msg):
+ Timedelta("- 1days, 00")
- def test_resolution_deprecated(self):
- # GH#21344
- td = Timedelta(days=4, hours=3)
- result = td.resolution
- assert result == Timedelta(nanoseconds=1)
+ @pytest.mark.parametrize("unit, np_unit", TD_UNITS_TD64_UNITS.items())
+ @pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
+ def test_unit_parser(self, unit, np_unit, wrapper):
+ # validate all units, GH 6855, GH 21762
+ # array-likes
+ expected = TimedeltaIndex(
+ [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
+ )
+ result = to_timedelta(wrapper(range(5)), unit=unit)
+ tm.assert_index_equal(result, expected)
+ result = TimedeltaIndex(wrapper(range(5)), unit=unit)
+ tm.assert_index_equal(result, expected)
- # Check that the attribute is available on the class, mirroring
- # the stdlib timedelta behavior
- result = Timedelta.resolution
- assert result == Timedelta(nanoseconds=1)
+ str_repr = [f"{x}{unit}" for x in np.arange(5)]
+ result = to_timedelta(wrapper(str_repr))
+ tm.assert_index_equal(result, expected)
+ result = to_timedelta(wrapper(str_repr))
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
- "value, expected",
+ "constructed_td, conversion",
[
- (Timedelta("10S"), True),
- (Timedelta("-10S"), True),
- (Timedelta(10, unit="ns"), True),
- (Timedelta(0, unit="ns"), False),
- (Timedelta(-10, unit="ns"), True),
- (Timedelta(None), True),
- (NaT, True),
+ (Timedelta(nanoseconds=100), "100ns"),
+ (
+ Timedelta(
+ days=1,
+ hours=1,
+ minutes=1,
+ weeks=1,
+ seconds=1,
+ milliseconds=1,
+ microseconds=1,
+ nanoseconds=1,
+ ),
+ 694861001001001,
+ ),
+ (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"),
+ (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"),
+ (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"),
],
)
-def test_truthiness(value, expected):
- # https://github.com/pandas-dev/pandas/issues/21484
- assert bool(value) is expected
-
-
-def test_timedelta_attribute_precision():
- # GH 31354
- td = Timedelta(1552211999999999872, unit="ns")
- result = td.days * 86400
- result += td.seconds
- result *= 1000000
- result += td.microseconds
- result *= 1000
- result += td.nanoseconds
- expected = td.value
- assert result == expected
-
-
-def test_freq_deprecated():
- # GH#46430
- td = Timedelta(123456546, unit="ns")
- with tm.assert_produces_warning(FutureWarning, match="Timedelta.freq"):
- freq = td.freq
-
- assert freq is None
-
- with pytest.raises(AttributeError, match="is not writable"):
- td.freq = offsets.Day()
-
-
-def test_is_populated_deprecated():
- # GH#46430
- td = Timedelta(123456546, unit="ns")
- with tm.assert_produces_warning(FutureWarning, match="Timedelta.is_populated"):
- td.is_populated
-
- with pytest.raises(AttributeError, match="is not writable"):
- td.is_populated = 1
+def test_td_constructor_on_nanoseconds(constructed_td, conversion):
+ # GH#9273
+ assert constructed_td == Timedelta(conversion)
-def test_delta_deprecated():
- # GH#46476
- td = Timedelta(123456546, unit="ns")
- with tm.assert_produces_warning(FutureWarning, match="Timedelta.delta is"):
- td.delta
+def test_nan_total_seconds():
+ # put elsewhere? a test of NaT, not Timedelta, behavior
+ rng = Timedelta(np.nan)
+ assert np.isnan(rng.total_seconds())
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index b46962fb82896..e4aa709d054b4 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -3,12 +3,13 @@
timedelta,
timezone,
)
+import re
import numpy as np
import pytest
from pandas._libs.tslibs import (
- OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
Timedelta,
Timestamp,
offsets,
@@ -39,10 +40,8 @@ def test_overflow_offset_raises(self):
stamp = Timestamp("2017-01-13 00:00:00")
offset_overflow = 20169940 * offsets.Day(1)
- msg = (
- "the add operation between "
- r"\<-?\d+ \* Days\> and \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} "
- "will overflow"
+ msg = re.escape(
+ "outside allowed range [-9223372036854775807ns, 9223372036854775807ns]"
)
lmsg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
@@ -51,7 +50,7 @@ def test_overflow_offset_raises(self):
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
@@ -66,23 +65,27 @@ def test_overflow_offset_raises(self):
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
- with pytest.raises(OverflowError, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
offset_overflow + stamp
with pytest.raises(OverflowError, match=lmsg):
stamp - offset_overflow
- def test_overflow_timestamp_raises(self):
+ def test_sub_returns_stdlib_timedelta_to_avoid_overflow(self):
# https://github.com/pandas-dev/pandas/issues/31774
- msg = "Result is too large"
+ msg = "Result is too large for pandas.Timedelta"
a = Timestamp("2101-01-01 00:00:00")
b = Timestamp("1688-01-01 00:00:00")
- with pytest.raises(OutOfBoundsDatetime, match=msg):
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
a - b
- # but we're OK for timestamp and datetime.datetime
- assert (a - b.to_pydatetime()) == (a.to_pydatetime() - b)
+ # but we're OK for Timestamp and datetime.datetime
+ r0 = a - b.to_pydatetime()
+ r1 = a.to_pydatetime() - b
+ assert r0 == r1
+ assert isinstance(r0, timedelta)
+ assert isinstance(r1, timedelta)
def test_delta_preserve_nanos(self):
val = Timestamp(1337299200000000123)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 7597d4345cfce..c04cd8071e477 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1838,14 +1838,8 @@ def test_to_datetime_list_of_integers(self):
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
+ msg = "outside allowed range"
- msg = "|".join(
- [
- "Python int too large to convert to C long",
- "long too big to convert",
- "int too big to convert",
- ]
- )
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 6c11ec42858c0..0825fd1fb7778 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -109,9 +109,7 @@ def test_to_timedelta_invalid_unit(self, arg):
def test_to_timedelta_time(self):
# time not supported ATM
- msg = (
- "Value must be Timedelta, string, integer, float, timedelta or convertible"
- )
+ msg = "Invalid type for timedelta scalar: <class 'datetime.time'>"
with pytest.raises(ValueError, match=msg):
to_timedelta(time(second=1))
assert to_timedelta(time(second=1), errors="coerce") is pd.NaT
@@ -264,10 +262,7 @@ def test_to_timedelta_zerodim(self, fixed_now_ts):
dt64 = fixed_now_ts.to_datetime64()
arg = np.array(dt64)
- msg = (
- "Value must be Timedelta, string, integer, float, timedelta "
- "or convertible, not datetime64"
- )
+ msg = "Invalid type for timedelta scalar: <class 'numpy.datetime64'>"
with pytest.raises(ValueError, match=msg):
to_timedelta(arg)
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index d9e86d53f2587..df5b4cb5f8df7 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -1,4 +1,8 @@
-import re
+"""
+Tests against the for-internal-use-only functions in pandas._libs.tslibs.timedeltas.
+
+For tests against the public Timedelta API, see pandas/tests/scalar/timedelta/
+"""
import numpy as np
import pytest
@@ -55,42 +59,27 @@ def test_delta_to_nanoseconds_error():
delta_to_nanoseconds(np.int32(3))
-def test_huge_nanoseconds_overflow():
+def test_delta_to_nanoseconds_overflow():
# GH 32402
assert delta_to_nanoseconds(Timedelta(1e10)) == 1e10
assert delta_to_nanoseconds(Timedelta(nanoseconds=1e10)) == 1e10
-@pytest.mark.parametrize(
- "kwargs", [{"Seconds": 1}, {"seconds": 1, "Nanoseconds": 1}, {"Foo": 2}]
-)
-def test_kwarg_assertion(kwargs):
- err_message = (
- "cannot construct a Timedelta from the passed arguments, "
- "allowed keywords are "
- "[weeks, days, hours, minutes, seconds, "
- "milliseconds, microseconds, nanoseconds]"
- )
-
- with pytest.raises(ValueError, match=re.escape(err_message)):
- Timedelta(**kwargs)
-
-
-class TestArrayToTimedelta64:
- def test_array_to_timedelta64_string_with_unit_2d_raises(self):
- # check the 'unit is not None and errors != "coerce"' path
- # in array_to_timedelta64 raises correctly with 2D values
- values = np.array([["1", 2], [3, "4"]], dtype=object)
- with pytest.raises(ValueError, match="unit must not be specified"):
- array_to_timedelta64(values, unit="s")
-
- def test_array_to_timedelta64_non_object_raises(self):
- # check we raise, not segfault
- values = np.arange(5)
-
- msg = "'values' must have object dtype"
- with pytest.raises(TypeError, match=msg):
- array_to_timedelta64(values)
+def test_array_to_timedelta64_string_with_unit_2d_raises():
+ # check the 'unit is not None and errors != "coerce"' path
+ # in array_to_timedelta64 raises correctly with 2D values
+ values = np.array([["1", 2], [3, "4"]], dtype=object)
+ with pytest.raises(ValueError, match="unit must not be specified"):
+ array_to_timedelta64(values, unit="s")
+
+
+def test_array_to_timedelta64_non_object_raises():
+ # check we raise, not segfault
+ values = np.arange(5)
+
+ msg = "'values' must have object dtype"
+ with pytest.raises(TypeError, match=msg):
+ array_to_timedelta64(values)
@pytest.mark.parametrize("unit", ["s", "ms", "us"])
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This is a companion of sorts to https://github.com/pandas-dev/pandas/pull/46854. It adds more thorough tests, and groups them by type, but doesn't change any behavior.
/cc @jbrockmendel @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/46936 | 2022-05-04T01:02:22Z | 2022-05-25T01:05:05Z | null | 2022-05-25T01:05:05Z |
PERF: Remove docstrings from inline cython code | diff --git a/setup.py b/setup.py
index 67b91c55dd397..11e3494b72561 100755
--- a/setup.py
+++ b/setup.py
@@ -46,8 +46,11 @@ def is_platform_mac():
__version__ as _CYTHON_VERSION,
)
from Cython.Build import cythonize
+ from Cython.Compiler import Options
_CYTHON_INSTALLED = parse_version(_CYTHON_VERSION) >= parse_version(min_cython_ver)
+ Options.docstrings = False
+
except ImportError:
_CYTHON_VERSION = None
_CYTHON_INSTALLED = False
| These functions aren't externally available, and removing them saves
a small amount of space (approximately 250k in the size of the generated
.so's on a linux64 machine).
| https://api.github.com/repos/pandas-dev/pandas/pulls/46934 | 2022-05-03T20:00:27Z | 2022-06-16T05:22:31Z | null | 2022-06-16T05:22:31Z |
ENH: support mask in libalgos.rank | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 60bdb504c545b..0cc9209fbdfc5 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -109,6 +109,7 @@ def rank_1d(
ascending: bool = ...,
pct: bool = ...,
na_option=...,
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
def rank_2d(
in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2]
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 6c28b4f821080..d33eba06988e9 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -889,6 +889,7 @@ def rank_1d(
bint ascending=True,
bint pct=False,
na_option="keep",
+ const uint8_t[:] mask=None,
):
"""
Fast NaN-friendly version of ``scipy.stats.rankdata``.
@@ -918,6 +919,8 @@ def rank_1d(
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
+ mask : np.ndarray[bool], optional, default None
+ Specify locations to be treated as NA, for e.g. Categorical.
"""
cdef:
TiebreakEnumType tiebreak
@@ -927,7 +930,6 @@ def rank_1d(
float64_t[::1] out
ndarray[numeric_object_t, ndim=1] masked_vals
numeric_object_t[:] masked_vals_memview
- uint8_t[:] mask
bint keep_na, nans_rank_highest, check_labels, check_mask
numeric_object_t nan_fill_val
@@ -956,6 +958,7 @@ def rank_1d(
or numeric_object_t is object
or (numeric_object_t is int64_t and is_datetimelike)
)
+ check_mask = check_mask or mask is not None
# Copy values into new array in order to fill missing data
# with mask, without obfuscating location of missing data
@@ -965,7 +968,9 @@ def rank_1d(
else:
masked_vals = values.copy()
- if numeric_object_t is object:
+ if mask is not None:
+ pass
+ elif numeric_object_t is object:
mask = missing.isnaobj(masked_vals)
elif numeric_object_t is int64_t and is_datetimelike:
mask = (masked_vals == NPY_NAT).astype(np.uint8)
diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi
index 197a8bdc0cd7c..2f0c3980c0c02 100644
--- a/pandas/_libs/groupby.pyi
+++ b/pandas/_libs/groupby.pyi
@@ -128,6 +128,7 @@ def group_rank(
ascending: bool = ...,
pct: bool = ...,
na_option: Literal["keep", "top", "bottom"] = ...,
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> None: ...
def group_max(
out: np.ndarray, # groupby_t[:, ::1]
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 9bc89eef089cd..03f318d08d8cb 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1262,6 +1262,7 @@ def group_rank(
bint ascending=True,
bint pct=False,
str na_option="keep",
+ const uint8_t[:, :] mask=None,
) -> None:
"""
Provides the rank of values within each group.
@@ -1294,6 +1295,7 @@ def group_rank(
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
+ mask : np.ndarray[bool] or None, default None
Notes
-----
@@ -1302,10 +1304,16 @@ def group_rank(
cdef:
Py_ssize_t i, k, N
ndarray[float64_t, ndim=1] result
+ const uint8_t[:] sub_mask
N = values.shape[1]
for k in range(N):
+ if mask is None:
+ sub_mask = None
+ else:
+ sub_mask = mask[:, k]
+
result = rank_1d(
values=values[:, k],
labels=labels,
@@ -1313,7 +1321,8 @@ def group_rank(
ties_method=ties_method,
ascending=ascending,
pct=pct,
- na_option=na_option
+ na_option=na_option,
+ mask=sub_mask,
)
for i in range(len(result)):
# TODO: why can't we do out[:, k] = result?
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 09954bd6be4e4..a769c92e0b542 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -46,7 +46,6 @@
ensure_platform_int,
is_1d_only_ea_dtype,
is_bool_dtype,
- is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
@@ -56,12 +55,14 @@
is_timedelta64_dtype,
needs_i8_conversion,
)
+from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
+ Categorical,
DatetimeArray,
ExtensionArray,
PeriodArray,
@@ -142,7 +143,15 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
# "group_any" and "group_all" are also support masks, but don't go
# through WrappedCythonOp
- _MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax", "min", "max", "last", "first"}
+ _MASKED_CYTHON_FUNCTIONS = {
+ "cummin",
+ "cummax",
+ "min",
+ "max",
+ "last",
+ "first",
+ "rank",
+ }
_cython_arity = {"ohlc": 4} # OHLC
@@ -229,12 +238,17 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
# never an invalid op for those dtypes, so return early as fastpath
return
- if is_categorical_dtype(dtype):
+ if isinstance(dtype, CategoricalDtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
- raise NotImplementedError(f"{dtype} dtype not supported")
+ elif how not in ["rank"]:
+ # only "rank" is implemented in cython
+ raise NotImplementedError(f"{dtype} dtype not supported")
+ elif not dtype.ordered:
+ # TODO: TypeError?
+ raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
@@ -332,6 +346,25 @@ def _ea_wrap_cython_operation(
**kwargs,
)
+ elif isinstance(values, Categorical) and self.uses_mask():
+ assert self.how == "rank" # the only one implemented ATM
+ assert values.ordered # checked earlier
+ mask = values.isna()
+ npvalues = values._ndarray
+
+ res_values = self._cython_op_ndim_compat(
+ npvalues,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=comp_ids,
+ mask=mask,
+ **kwargs,
+ )
+
+ # If we ever have more than just "rank" here, we'll need to do
+ # `if self.how in self.cast_blocklist` like we do for other dtypes.
+ return res_values
+
npvalues = self._ea_to_cython_values(values)
res_values = self._cython_op_ndim_compat(
@@ -551,6 +584,9 @@ def _call_cython_op(
else:
# TODO: min_count
if self.uses_mask():
+ if self.how != "rank":
+ # TODO: should rank take result_mask?
+ kwargs["result_mask"] = result_mask
func(
out=result,
values=values,
@@ -558,7 +594,6 @@ def _call_cython_op(
ngroups=ngroups,
is_datetimelike=is_datetimelike,
mask=mask,
- result_mask=result_mask,
**kwargs,
)
else:
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 7830c229ece2f..8bbe38d3379ac 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -458,6 +458,8 @@ def test_rank_avg_even_vals(dtype, upper):
result = df.groupby("key").rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
+ if upper:
+ exp_df = exp_df.astype("Float64")
tm.assert_frame_equal(result, exp_df)
@@ -663,3 +665,17 @@ def test_non_unique_index():
name="value",
)
tm.assert_series_equal(result, expected)
+
+
+def test_rank_categorical():
+ cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)
+ cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)
+
+ df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})
+
+ gb = df.groupby("col1")
+
+ res = gb.rank()
+
+ expected = df.astype(object).groupby("col1").rank()
+ tm.assert_frame_equal(res, expected)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46932 | 2022-05-02T22:35:01Z | 2022-05-04T13:07:42Z | 2022-05-04T13:07:42Z | 2022-05-04T20:14:26Z |
DOC: added index, dropna description for HDF methods #45030 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c615216240d60..13af64c9fea5d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2599,6 +2599,8 @@ def to_hdf(
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed".
+ index : bool, default True
+ Write DataFrame index as a column.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
@@ -2609,6 +2611,8 @@ def to_hdf(
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
+ dropna : bool, default False, optional
+ Remove missing values.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 505f5a74f06e6..fc9671c2fc973 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1112,6 +1112,8 @@ def put(
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
@@ -1124,6 +1126,8 @@ def put(
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
+ dropna : bool, default False, optional
+ Remove missing values.
.. versionadded:: 1.1.0
"""
@@ -1239,6 +1243,8 @@ def append(
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
@@ -1251,7 +1257,7 @@ def append(
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
- dropna : bool, default False
+ dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
| - [ ] closes #45030
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46931 | 2022-05-02T22:20:01Z | 2022-05-04T23:38:22Z | 2022-05-04T23:38:22Z | 2022-05-05T08:15:34Z |
DataFrame.replace with dict doesn't work when value=None | diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index bece833066f89..890b988378870 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -482,8 +482,8 @@
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
- 'y' with 'z'. To use a dict in this way the `value`
- parameter should be `None`.
+ 'y' with 'z'. To use a dict in this way, the optional `value`
+ parameter should not be given.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
@@ -494,8 +494,8 @@
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
- 'a' for the value 'b' and replace it with NaN. The `value`
- parameter should be ``None`` to use a nested dict in this
+ 'a' for the value 'b' and replace it with NaN. The optional `value`
+ parameter should not be specified to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
| - [x] closes #46606 (Replace xxxx with the Github issue number) | https://api.github.com/repos/pandas-dev/pandas/pulls/46930 | 2022-05-02T22:16:55Z | 2022-05-03T01:36:54Z | 2022-05-03T01:36:53Z | 2022-05-03T01:37:00Z |
TYP: overload maybe_downcast_numeric and maybe_downcast_to_dtype | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index cba055d5b4345..88a92ea1455d0 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -246,6 +246,16 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
+@overload
+def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
+ ...
+
+
+@overload
+def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
+ ...
+
+
def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
"""
try to cast to the specified dtype (e.g. convert back to bool/int
@@ -301,6 +311,20 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
return result
+@overload
+def maybe_downcast_numeric(
+ result: np.ndarray, dtype: np.dtype, do_round: bool = False
+) -> np.ndarray:
+ ...
+
+
+@overload
+def maybe_downcast_numeric(
+ result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
+) -> ArrayLike:
+ ...
+
+
def maybe_downcast_numeric(
result: ArrayLike, dtype: DtypeObj, do_round: bool = False
) -> ArrayLike:
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index a769c92e0b542..599f384a89a68 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -637,9 +637,7 @@ def _call_cython_op(
else:
op_result = result
- # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
- # expected "ndarray")
- return op_result # type: ignore[return-value]
+ return op_result
@final
def cython_operation(
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f6a20a418c32b..c3acfc5ff2f66 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1141,11 +1141,7 @@ def interval_range(
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray]", variable has type "ndarray")
- breaks = maybe_downcast_numeric( # type: ignore[assignment]
- breaks, np.dtype("int64")
- )
+ breaks = maybe_downcast_numeric(breaks, np.dtype("int64"))
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
| xref https://github.com/pandas-dev/pandas/issues/37715
Overload type definitions for these methods so that passing in an ndarray returns an ndarray.
This allows removing two more ignored mypy errors. | https://api.github.com/repos/pandas-dev/pandas/pulls/46929 | 2022-05-02T21:28:09Z | 2022-05-06T14:23:01Z | 2022-05-06T14:23:01Z | 2022-05-06T14:23:27Z |
TYP: resolve ignored mypy errors in core/describe.py | diff --git a/pandas/core/describe.py b/pandas/core/describe.py
index 60881d7a68b10..c70dbe0b8b0b1 100644
--- a/pandas/core/describe.py
+++ b/pandas/core/describe.py
@@ -22,7 +22,10 @@
import numpy as np
from pandas._libs.tslibs import Timestamp
-from pandas._typing import NDFrameT
+from pandas._typing import (
+ NDFrameT,
+ npt,
+)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_percentile
@@ -186,11 +189,9 @@ def _select_data(self):
"""Select columns to be described."""
if (self.include is None) and (self.exclude is None):
# when some numerics are found, keep only numerics
- default_include = [np.number]
+ default_include: list[npt.DTypeLike] = [np.number]
if self.datetime_is_numeric:
- # error: Argument 1 to "append" of "list" has incompatible type "str";
- # expected "Type[number[Any]]"
- default_include.append("datetime") # type: ignore[arg-type]
+ default_include.append("datetime")
data = self.obj.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self.obj
@@ -230,10 +231,7 @@ def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:
"""
from pandas import Series
- # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]";
- # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str,
- # float]]]"
- formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type]
+ formatted_percentiles = format_percentiles(percentiles)
stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
d = (
@@ -337,10 +335,7 @@ def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
# GH-30164
from pandas import Series
- # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]";
- # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str,
- # float]]]"
- formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type]
+ formatted_percentiles = format_percentiles(percentiles)
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index fd79d418658ea..ef25224e5a847 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1667,7 +1667,7 @@ def _format_strings(self) -> list[str]:
def format_percentiles(
- percentiles: (np.ndarray | list[int | float] | list[float] | list[str | float]),
+ percentiles: (np.ndarray | Sequence[float]),
) -> list[str]:
"""
Outputs rounded and formatted percentiles.
| xref https://github.com/pandas-dev/pandas/issues/37715
| https://api.github.com/repos/pandas-dev/pandas/pulls/46928 | 2022-05-02T20:26:52Z | 2022-05-06T21:24:31Z | 2022-05-06T21:24:31Z | 2022-05-06T21:24:36Z |
CLN: tzconversion | diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index a34161b20e2ff..600ac54639dfc 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -6,9 +6,6 @@ from numpy cimport (
)
-cdef int64_t localize_tzinfo_api(
- int64_t utc_val, tzinfo tz, bint* fold=*
-) except? -1
cdef int64_t tz_convert_from_utc_single(
int64_t utc_val, tzinfo tz, bint* fold=?, Py_ssize_t* outpos=?
) except? -1
@@ -16,15 +13,6 @@ cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=*
) except? -1
-cdef Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n)
-
-cdef bint infer_dateutil_fold(
- int64_t value,
- const int64_t[::1] trans,
- const int64_t[::1] deltas,
- Py_ssize_t pos,
-)
-
cdef class Localizer:
cdef:
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 8d307e324ba4e..fede9768f5fee 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -50,7 +50,6 @@ cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
@cython.freelist(16)
-#@cython.internal
@cython.final
cdef class Localizer:
# cdef:
@@ -102,13 +101,15 @@ cdef class Localizer:
if self.use_utc:
return utc_val
elif self.use_tzlocal:
- return utc_val + localize_tzinfo_api(utc_val, self.tz, fold)
+ return utc_val + _tz_localize_using_tzinfo_api(
+ utc_val, self.tz, to_utc=False, fold=fold
+ )
elif self.use_fixed:
return utc_val + self.delta
else:
pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
if fold is not NULL:
- fold[0] = infer_dateutil_fold(
+ fold[0] = _infer_dateutil_fold(
utc_val, self.trans, self.deltas, pos[0]
)
@@ -184,10 +185,10 @@ timedelta-like}
cdef:
const int64_t[::1] deltas
ndarray[uint8_t, cast=True] ambiguous_array
- Py_ssize_t i, isl, isr, idx, pos, ntrans, n = vals.shape[0]
+ Py_ssize_t i, idx, pos, ntrans, n = vals.shape[0]
Py_ssize_t delta_idx_offset, delta_idx, pos_left, pos_right
int64_t *tdata
- int64_t v, left, right, val, v_left, v_right, new_local, remaining_mins
+ int64_t v, left, right, val, new_local, remaining_mins
int64_t first_delta, delta
int64_t shift_delta = 0
ndarray[int64_t] trans, result_a, result_b, dst_hours
@@ -202,7 +203,7 @@ timedelta-like}
if is_utc(tz) or tz is None:
return vals.copy()
- result = np.empty(n, dtype=np.int64)
+ result = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
if is_tzlocal(tz) or is_zoneinfo(tz):
for i in range(n):
@@ -265,40 +266,7 @@ timedelta-like}
# Determine whether each date lies left of the DST transition (store in
# result_a) or right of the DST transition (store in result_b)
- result_a = np.empty(n, dtype=np.int64)
- result_b = np.empty(n, dtype=np.int64)
-
- for i in range(n):
- # This loops resembles the "Find the two best possibilities" block
- # in pytz's DstTZInfo.localize method.
- result_a[i] = NPY_NAT
- result_b[i] = NPY_NAT
-
- val = vals[i]
- if val == NPY_NAT:
- continue
-
- # TODO: be careful of overflow in val-DAY_NANOS
- isl = bisect_right_i8(tdata, val - DAY_NANOS, ntrans) - 1
- if isl < 0:
- isl = 0
-
- v_left = val - deltas[isl]
- pos_left = bisect_right_i8(tdata, v_left, ntrans) - 1
- # timestamp falls to the left side of the DST transition
- if v_left + deltas[pos_left] == val:
- result_a[i] = v_left
-
- # TODO: be careful of overflow in val+DAY_NANOS
- isr = bisect_right_i8(tdata, val + DAY_NANOS, ntrans) - 1
- if isr < 0:
- isr = 0
-
- v_right = val - deltas[isr]
- pos_right = bisect_right_i8(tdata, v_right, ntrans) - 1
- # timestamp falls to the right side of the DST transition
- if v_right + deltas[pos_right] == val:
- result_b[i] = v_right
+ result_a, result_b =_get_utc_bounds(vals, tdata, ntrans, deltas)
# silence false-positive compiler warning
dst_hours = np.empty(0, dtype=np.int64)
@@ -417,6 +385,59 @@ cdef inline str _render_tstamp(int64_t val):
return str(Timestamp(val))
+cdef _get_utc_bounds(
+ ndarray vals,
+ int64_t* tdata,
+ Py_ssize_t ntrans,
+ const int64_t[::1] deltas,
+):
+ # Determine whether each date lies left of the DST transition (store in
+ # result_a) or right of the DST transition (store in result_b)
+
+ cdef:
+ ndarray result_a, result_b
+ Py_ssize_t i, n = vals.size
+ int64_t val, v_left, v_right
+ Py_ssize_t isl, isr, pos_left, pos_right
+
+ result_a = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
+ result_b = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
+
+ for i in range(n):
+ # This loops resembles the "Find the two best possibilities" block
+ # in pytz's DstTZInfo.localize method.
+ result_a[i] = NPY_NAT
+ result_b[i] = NPY_NAT
+
+ val = vals[i]
+ if val == NPY_NAT:
+ continue
+
+ # TODO: be careful of overflow in val-DAY_NANOS
+ isl = bisect_right_i8(tdata, val - DAY_NANOS, ntrans) - 1
+ if isl < 0:
+ isl = 0
+
+ v_left = val - deltas[isl]
+ pos_left = bisect_right_i8(tdata, v_left, ntrans) - 1
+ # timestamp falls to the left side of the DST transition
+ if v_left + deltas[pos_left] == val:
+ result_a[i] = v_left
+
+ # TODO: be careful of overflow in val+DAY_NANOS
+ isr = bisect_right_i8(tdata, val + DAY_NANOS, ntrans) - 1
+ if isr < 0:
+ isr = 0
+
+ v_right = val - deltas[isr]
+ pos_right = bisect_right_i8(tdata, v_right, ntrans) - 1
+ # timestamp falls to the right side of the DST transition
+ if v_right + deltas[pos_right] == val:
+ result_b[i] = v_right
+
+ return result_a, result_b
+
+
@cython.boundscheck(False)
cdef ndarray[int64_t] _get_dst_hours(
# vals only needed here to potential render an exception message
@@ -433,10 +454,10 @@ cdef ndarray[int64_t] _get_dst_hours(
intp_t switch_idx
int64_t left, right
- dst_hours = np.empty(n, dtype=np.int64)
+ dst_hours = cnp.PyArray_EMPTY(result_a.ndim, result_a.shape, cnp.NPY_INT64, 0)
dst_hours[:] = NPY_NAT
- mismatch = np.zeros(n, dtype=bool)
+ mismatch = cnp.PyArray_ZEROS(result_a.ndim, result_a.shape, cnp.NPY_BOOL, 0)
for i in range(n):
left = result_a[i]
@@ -450,6 +471,7 @@ cdef ndarray[int64_t] _get_dst_hours(
trans_idx = mismatch.nonzero()[0]
if trans_idx.size == 1:
+ # TODO: not reached in tests 2022-05-02; possible?
stamp = _render_tstamp(vals[trans_idx[0]])
raise pytz.AmbiguousTimeError(
f"Cannot infer dst time from {stamp} as there "
@@ -471,6 +493,7 @@ cdef ndarray[int64_t] _get_dst_hours(
delta = np.diff(result_a[grp])
if grp.size == 1 or np.all(delta > 0):
+ # TODO: not reached in tests 2022-05-02; possible?
stamp = _render_tstamp(vals[grp[0]])
raise pytz.AmbiguousTimeError(stamp)
@@ -478,6 +501,7 @@ cdef ndarray[int64_t] _get_dst_hours(
# for standard
switch_idxs = (delta <= 0).nonzero()[0]
if switch_idxs.size > 1:
+ # TODO: not reached in tests 2022-05-02; possible?
raise pytz.AmbiguousTimeError(
f"There are {switch_idxs.size} dst switches when "
"there should only be 1."
@@ -495,15 +519,6 @@ cdef ndarray[int64_t] _get_dst_hours(
# ----------------------------------------------------------------------
# Timezone Conversion
-cdef int64_t localize_tzinfo_api(
- int64_t utc_val, tzinfo tz, bint* fold=NULL
-) except? -1:
- """
- See _tz_localize_using_tzinfo_api.__doc__
- """
- return _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False, fold=fold)
-
-
def py_tz_convert_from_utc_single(int64_t utc_val, tzinfo tz):
# The 'bint* fold=NULL' in tz_convert_from_utc_single means we cannot
# make it cdef, so this is version exposed for testing from python.
@@ -608,7 +623,7 @@ cdef int64_t _tz_localize_using_tzinfo_api(
# NB: relies on dateutil internals, subject to change.
@cython.boundscheck(False)
@cython.wraparound(False)
-cdef bint infer_dateutil_fold(
+cdef bint _infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
| Cleanups following recent refactoring. | https://api.github.com/repos/pandas-dev/pandas/pulls/46926 | 2022-05-02T16:00:08Z | 2022-05-02T20:10:43Z | 2022-05-02T20:10:42Z | 2022-05-02T20:48:29Z |
Backport PR #46912 on branch 1.4.x (CI: More targeted pyarrow version testing) | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index 0a914dd965a5e..f5cbb0e88ff11 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -62,6 +62,15 @@ jobs:
pattern: "not slow and not network and not single_cpu"
pandas_testing_mode: "deprecate"
test_args: "-W error::DeprecationWarning:numpy"
+ exclude:
+ - env_file: actions-39.yaml
+ pyarrow_version: "6"
+ - env_file: actions-39.yaml
+ pyarrow_version: "7"
+ - env_file: actions-310.yaml
+ pyarrow_version: "6"
+ - env_file: actions-310.yaml
+ pyarrow_version: "7"
fail-fast: false
name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }}
env:
| Backport PR #46912: CI: More targeted pyarrow version testing | https://api.github.com/repos/pandas-dev/pandas/pulls/46925 | 2022-05-02T15:01:10Z | 2022-05-02T20:31:43Z | 2022-05-02T20:31:43Z | 2022-05-02T20:31:44Z |
STYL: a few cleanups in pyi files | diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index 5c7be5e660fd9..a6d593076777d 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -144,26 +144,13 @@ class HashTable:
np.ndarray, # np.ndarray[subclass-specific]
npt.NDArray[np.intp],
] | np.ndarray: ... # np.ndarray[subclass-specific]
- def _unique(
- self,
- values: np.ndarray, # np.ndarray[subclass-specific]
- uniques, # FooVector
- count_prior: int = ...,
- na_sentinel: int = ...,
- na_value: object = ...,
- ignore_na: bool = ...,
- return_inverse: bool = ...,
- ) -> tuple[
- np.ndarray, # np.ndarray[subclass-specific]
- npt.NDArray[np.intp],
- ] | np.ndarray: ... # np.ndarray[subclass-specific]
def factorize(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
na_sentinel: int = ...,
na_value: object = ...,
mask=...,
- ) -> tuple[np.ndarray, npt.NDArray[np.intp],]: ... # np.ndarray[subclass-specific]
+ ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
class Complex128HashTable(HashTable): ...
class Complex64HashTable(HashTable): ...
@@ -175,7 +162,7 @@ class Int64HashTable(HashTable):
def get_labels_groupby(
self,
values: npt.NDArray[np.int64], # const int64_t[:]
- ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64],]: ...
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ...
def map_keys_to_values(
self,
keys: npt.NDArray[np.int64],
@@ -198,13 +185,13 @@ def duplicated(
keep: Literal["last", "first", False] = ...,
) -> npt.NDArray[np.bool_]: ...
def mode(
- values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ...
) -> np.ndarray: ...
def value_count(
values: np.ndarray,
dropna: bool,
- mask: npt.NDArray[np.bool_] | None = None,
-) -> tuple[np.ndarray, npt.NDArray[np.int64],]: ... # np.ndarray[same-as-values]
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ... # np.ndarray[same-as-values]
# arr and values should have same dtype
def ismember(
diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi
index 3d0288160e386..ca6f301673f33 100644
--- a/pandas/_libs/tslibs/conversion.pyi
+++ b/pandas/_libs/tslibs/conversion.pyi
@@ -14,7 +14,7 @@ class OutOfBoundsTimedelta(ValueError): ...
def precision_from_unit(
unit: str,
-) -> tuple[int, int,]: ... # (int64_t, _)
+) -> tuple[int, int]: ... # (int64_t, _)
def ensure_datetime64ns(
arr: np.ndarray, # np.ndarray[datetime64[ANY]]
copy: bool = ...,
@@ -25,5 +25,5 @@ def ensure_timedelta64ns(
) -> np.ndarray: ... # np.ndarray[timedelta64ns]
def datetime_to_datetime64(
values: npt.NDArray[np.object_],
-) -> tuple[np.ndarray, tzinfo | None,]: ... # (np.ndarray[dt64ns], _)
+) -> tuple[np.ndarray, tzinfo | None]: ... # (np.ndarray[dt64ns], _)
def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index ab66677a8be3a..31ed25791389f 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -1,7 +1,5 @@
from enum import Enum
-from pandas._libs.tslibs.offsets import BaseOffset
-
# These are not public API, but are exposed in the .pyi file because they
# are imported in tests.
_attrname_to_abbrevs: dict[str, str]
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index c5ffaba37f51f..13daba5cfcbdf 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -16,7 +16,6 @@ import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
- NaTType,
Period,
Tick,
Timedelta,
diff --git a/pandas/conftest.py b/pandas/conftest.py
index ecdc0f10b1f56..9d98478010c97 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -17,7 +17,6 @@
- Dtypes
- Misc
"""
-# pyright: reportUntypedFunctionDecorator = false
from collections import abc
from datetime import (
diff --git a/pyproject.toml b/pyproject.toml
index c5f89076a29fa..7b32c5f8eab49 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -153,34 +153,21 @@ pythonVersion = "3.8"
typeCheckingMode = "basic"
include = ["pandas", "typings"]
exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
+# enable subset of "strict"
+reportDuplicateImport = true
+reportInvalidStubStatement = true
+reportPropertyTypeMismatch = true
+reportUntypedClassDecorator = true
+reportUntypedFunctionDecorator = true
+reportUntypedNamedTuple = true
+# disable subset of "basic"
reportGeneralTypeIssues = false
-reportConstantRedefinition = false
-reportFunctionMemberAccess = false
-reportImportCycles = false
-reportIncompatibleMethodOverride = false
-reportIncompatibleVariableOverride = false
reportMissingModuleSource = false
-reportMissingParameterType = false
-reportMissingTypeArgument = false
-reportMissingTypeStubs = false
reportOptionalCall = false
reportOptionalIterable = false
reportOptionalMemberAccess = false
reportOptionalOperand = false
reportOptionalSubscript = false
reportPrivateImportUsage = false
-reportPrivateUsage = false
reportUnboundVariable = false
-reportUnknownArgumentType = false
-reportUnknownLambdaType = false
-reportUnknownMemberType = false
-reportUnknownParameterType = false
-reportUnknownVariableType = false
-reportUnnecessaryComparison = false
-reportUnnecessaryIsInstance = false
reportUnsupportedDunderAll = false
-reportUntypedBaseClass = false
-reportUnusedClass = false
-reportUnusedFunction = false
-reportUnusedImport = false
-reportUnusedVariable = false
| Might make sense to run flake8 on pyi files (add flake8-pyi to pre-commit) | https://api.github.com/repos/pandas-dev/pandas/pulls/46921 | 2022-05-01T21:53:51Z | 2022-05-03T09:36:56Z | 2022-05-03T09:36:56Z | 2022-05-26T01:59:22Z |
TYP: fix a few annotations in offsets.pyi | diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index 4cc301018e8f8..9410379b16ba2 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -9,9 +9,7 @@ from typing import (
Any,
Collection,
Literal,
- Tuple,
TypeVar,
- Union,
overload,
)
@@ -101,7 +99,7 @@ def _get_offset(name: str) -> BaseOffset: ...
class SingleConstructorOffset(BaseOffset):
@classmethod
- def _from_name(cls, suffix=...): ...
+ def _from_name(cls, suffix: None = ...): ...
def __reduce__(self): ...
@overload
@@ -132,7 +130,7 @@ class RelativeDeltaOffset(BaseOffset):
class BusinessMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
- ): ...
+ ) -> None: ...
class BusinessDay(BusinessMixin): ...
@@ -144,14 +142,17 @@ class BusinessHour(BusinessMixin):
start: str | Collection[str] = ...,
end: str | Collection[str] = ...,
offset: timedelta = ...,
- ): ...
+ ) -> None: ...
-class WeekOfMonthMixin(SingleConstructorOffset): ...
+class WeekOfMonthMixin(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., weekday: int = ...
+ ) -> None: ...
class YearOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., month: int | None = ...
- ): ...
+ ) -> None: ...
class BYearEnd(YearOffset): ...
class BYearBegin(YearOffset): ...
@@ -186,7 +187,11 @@ class Week(SingleConstructorOffset):
self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
) -> None: ...
-class WeekOfMonth(WeekOfMonthMixin): ...
+class WeekOfMonth(WeekOfMonthMixin):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
+ ) -> None: ...
+
class LastWeekOfMonth(WeekOfMonthMixin): ...
class FY5253Mixin(SingleConstructorOffset):
@@ -196,11 +201,22 @@ class FY5253Mixin(SingleConstructorOffset):
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
- variation: str = ...,
+ variation: Literal["nearest", "last"] = ...,
) -> None: ...
class FY5253(FY5253Mixin): ...
-class FY5253Quarter(FY5253Mixin): ...
+
+class FY5253Quarter(FY5253Mixin):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekday: int = ...,
+ startingMonth: int = ...,
+ qtr_with_extra_week: int = ...,
+ variation: Literal["nearest", "last"] = ...,
+ ) -> None: ...
+
class Easter(SingleConstructorOffset): ...
class _CustomBusinessMonth(BusinessMixin):
@@ -208,29 +224,35 @@ class _CustomBusinessMonth(BusinessMixin):
self,
n: int = ...,
normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
offset: timedelta = ...,
- holidays: None | list = ...,
- ): ...
+ ) -> None: ...
class CustomBusinessDay(BusinessDay):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
- offset: timedelta = ...,
weekmask: str = ...,
- ): ...
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
class CustomBusinessHour(BusinessHour):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
start: str = ...,
end: str = ...,
offset: timedelta = ...,
- holidays: None | list = ...,
- ): ...
+ ) -> None: ...
class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 00ef4fcbf8986..6c96df9a7ea0b 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -3291,7 +3291,7 @@ cdef class CustomBusinessDay(BusinessDay):
holidays : list
List/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``.
- calendar : pd.HolidayCalendar or np.busdaycalendar
+ calendar : np.busdaycalendar
offset : timedelta, default timedelta(0)
"""
@@ -3417,7 +3417,7 @@ cdef class _CustomBusinessMonth(BusinessMixin):
holidays : list
List/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``.
- calendar : pd.HolidayCalendar or np.busdaycalendar
+ calendar : np.busdaycalendar
Calendar to integrate.
offset : timedelta, default timedelta(0)
Time offset to apply.
| - [ ] closes #46908
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Some doc-strings refer to `pd.HolidayCalendar` but that doesn't seem to exist?!
There are a few more un-annotated variables and a few containers (list and dict) without types but I'm not familair enough with them to annotate them. | https://api.github.com/repos/pandas-dev/pandas/pulls/46920 | 2022-05-01T18:18:51Z | 2022-05-03T11:58:49Z | 2022-05-03T11:58:48Z | 2022-05-26T01:59:21Z |
TYP: fix MultiIndex._names type | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 51ca9dbd763b4..6b7aca1b6c4ee 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10420,12 +10420,14 @@ def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
else:
mask = index_mask.reshape(-1, 1) & values_mask
- if isinstance(level, str):
- level = count_axis._get_level_number(level)
+ if isinstance(level, int):
+ level_number = level
+ else:
+ level_number = count_axis._get_level_number(level)
- level_name = count_axis._names[level]
- level_index = count_axis.levels[level]._rename(name=level_name)
- level_codes = ensure_platform_int(count_axis.codes[level])
+ level_name = count_axis._names[level_number]
+ level_index = count_axis.levels[level_number]._rename(name=level_name)
+ level_codes = ensure_platform_int(count_axis.codes[level_number])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 752ce28c58f55..29df930c5aaf3 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -287,7 +287,7 @@ class MultiIndex(Index):
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
- _names = FrozenList()
+ _names: list[Hashable | None] = []
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
@@ -326,9 +326,7 @@ def __new__(
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
- # Incompatible types in assignment (expression has type "List[None]",
- # variable has type "FrozenList") [assignment]
- result._names = [None] * len(levels) # type: ignore[assignment]
+ result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
@@ -1476,8 +1474,7 @@ def _set_names(self, names, *, level=None, validate: bool = True):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
- # error: Cannot determine type of '__setitem__'
- self._names[lev] = name # type: ignore[has-type]
+ self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
| xref https://github.com/pandas-dev/pandas/issues/37715
_names was never really a FrozenList.
Also adapted some related code in _count_level
The change in _count_level is required because `list` cannot be indexed by `Hashable` - this issue had not been detected by mypy up until now because the argument of `__getitem__` is untyped for `FrozenList`. | https://api.github.com/repos/pandas-dev/pandas/pulls/46919 | 2022-05-01T16:40:03Z | 2022-05-21T20:04:21Z | 2022-05-21T20:04:20Z | 2022-05-21T20:04:25Z |
Fix: subset parameter of DataFrameGroupBy.value_counts has no effect | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 245e33fb1a23b..f9fc7d3fecb0e 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1735,12 +1735,20 @@ def value_counts(
name = self._selected_obj.name
keys = [] if name in in_axis_names else [self._selected_obj]
else:
- keys = [
- # Can't use .values because the column label needs to be preserved
- self._selected_obj.iloc[:, idx]
- for idx, name in enumerate(self._selected_obj.columns)
- if name not in in_axis_names
- ]
+ if subset:
+ keys = [
+ self._selected_obj.iloc[:, idx]
+ for idx, name in enumerate(self._selected_obj.columns)
+ if name in subset
+ ]
+ else:
+ keys = [
+ # Can't use .values because the column label needs to
+ # be preserved
+ self._selected_obj.iloc[:, idx]
+ for idx, name in enumerate(self._selected_obj.columns)
+ if name not in in_axis_names
+ ]
if subset is not None:
clashing = set(subset) & set(in_axis_names)
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index 577a72d3f5090..4bfe241725e9a 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -191,3 +191,13 @@ def test_series_groupby_value_counts_on_categorical():
# dtype: int64
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("columns", [["c1"], ["c2"], ["c1", "c2"]])
+def test_groupby_value_counts_subset(columns):
+ # GH46383
+ df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1])
+ result = df.groupby(level=0).value_counts(subset=columns)
+ expected = df.groupby(level=0)[columns].value_counts()
+
+ tm.assert_series_equal(result, expected)
| - [x] closes #46383
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46918 | 2022-05-01T16:06:30Z | 2022-05-01T16:08:10Z | null | 2022-05-01T16:08:10Z |
REF: stronger typing in _box_func | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5c8c6d7fe23a3..5859f051ab343 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -538,13 +538,10 @@ def _check_compatible_with(self, other, setitem: bool = False):
# -----------------------------------------------------------------
# Descriptive Properties
- def _box_func(self, x) -> Timestamp | NaTType:
- if isinstance(x, np.datetime64):
- # GH#42228
- # Argument 1 to "signedinteger" has incompatible type "datetime64";
- # expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
- x = np.int64(x) # type: ignore[arg-type]
- ts = Timestamp(x, tz=self.tz)
+ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
+ # GH#42228
+ value = x.view("i8")
+ ts = Timestamp(value, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 2c6e7119b478d..1f55842050df0 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -154,7 +154,7 @@ class TimedeltaArray(dtl.TimelikeOps):
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
- def _box_func(self, x) -> Timedelta | NaTType:
+ def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
return Timedelta(x, unit="ns")
@property
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 1a897dba6ac80..713d80c26ef7a 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -16,7 +16,6 @@
from pandas._libs import (
NaT,
NaTType,
- Timedelta,
iNaT,
lib,
)
@@ -367,19 +366,23 @@ def _wrap_results(result, dtype: np.dtype, fill_value=None):
result = np.datetime64("NaT", "ns")
else:
result = np.int64(result).view("datetime64[ns]")
+ # retain original unit
+ result = result.astype(dtype, copy=False)
else:
# If we have float dtype, taking a view will give the wrong result
result = result.astype(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
- if result == fill_value:
- result = np.nan
+ if result == fill_value or np.isnan(result):
+ result = np.timedelta64("NaT").astype(dtype)
- # raise if we have a timedelta64[ns] which is too large
- if np.fabs(result) > lib.i8max:
+ elif np.fabs(result) > lib.i8max:
+ # raise if we have a timedelta64[ns] which is too large
raise ValueError("overflow in timedelta operation")
+ else:
+ # return a timedelta64 with the original unit
+ result = np.int64(result).astype(dtype, copy=False)
- result = Timedelta(result, unit="ns")
else:
result = result.astype("m8[ns]").view(dtype)
@@ -641,7 +644,7 @@ def _mask_datetimelike_result(
result[axis_mask] = iNaT # type: ignore[index]
else:
if mask.any():
- return NaT
+ return np.int64(iNaT).view(orig_values.dtype)
return result
diff --git a/pandas/tests/arrays/timedeltas/test_reductions.py b/pandas/tests/arrays/timedeltas/test_reductions.py
index 586a9187fc169..72d45f5b9a78c 100644
--- a/pandas/tests/arrays/timedeltas/test_reductions.py
+++ b/pandas/tests/arrays/timedeltas/test_reductions.py
@@ -147,7 +147,7 @@ def test_std(self, add):
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=True)
- assert isinstance(result, Timedelta)
+ assert isinstance(result, np.timedelta64)
assert result == expected
result = arr.std(skipna=False)
@@ -158,7 +158,8 @@ def test_std(self, add):
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=False)
- assert result is pd.NaT
+ assert isinstance(result, np.timedelta64)
+ assert np.isnat(result)
def test_median(self):
tdi = pd.TimedeltaIndex(["0H", "3H", "NaT", "5H06m", "0H", "2H"])
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 240b9dacce73a..005f7b088271f 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1020,7 +1020,8 @@ def test_nanmean_skipna_false(self, dtype):
arr[-1, -1] = "NaT"
result = nanops.nanmean(arr, skipna=False)
- assert result is pd.NaT
+ assert np.isnat(result)
+ assert result.dtype == dtype
result = nanops.nanmean(arr, axis=0, skipna=False)
expected = np.array([4, 5, "NaT"], dtype=arr.dtype)
| Necessary for non-nano support | https://api.github.com/repos/pandas-dev/pandas/pulls/46917 | 2022-05-01T15:22:50Z | 2022-05-02T14:58:31Z | 2022-05-02T14:58:31Z | 2022-05-02T15:47:23Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 767ef62bb1758..bd095c03e6fdb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -26,7 +26,7 @@ repos:
types_or: [python, rst, markdown]
files: ^(pandas|doc)/
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.1.0
+ rev: v4.2.0
hooks:
- id: debug-statements
- id: end-of-file-fixer
@@ -56,7 +56,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
- rev: v2.31.1
+ rev: v2.32.0
hooks:
- id: pyupgrade
args: [--py38-plus]
@@ -71,7 +71,7 @@ repos:
types: [text] # overwrite types: [rst]
types_or: [python, rst]
- repo: https://github.com/sphinx-contrib/sphinx-lint
- rev: v0.2
+ rev: v0.4.1
hooks:
- id: sphinx-lint
- repo: https://github.com/asottile/yesqa
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/main)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/build/src/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.18.1-py2.py3-none-any.whl (197 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 197.8/197.8 KB 23.1 MB/s eta 0:00:00
Collecting cfgv>=2.0.0
Downloading cfgv-3.3.1-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-2.5.0-py2.py3-none-any.whl (98 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 98.6/98.6 KB 21.6 MB/s eta 0:00:00
Collecting nodeenv>=0.11.1
Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB)
Collecting pyyaml>=5.1
Downloading PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (682 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 682.2/682.2 KB 43.2 MB/s eta 0:00:00
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.14.1-py2.py3-none-any.whl (8.8 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.8/8.8 MB 98.1 MB/s eta 0:00:00
Collecting toml
Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting filelock<4,>=3.2
Downloading filelock-3.6.0-py3-none-any.whl (10.0 kB)
Collecting six<2,>=1.9.0
Downloading six-1.16.0-py2.py3-none-any.whl (11 kB)
Collecting distlib<1,>=0.3.1
Downloading distlib-0.3.4-py2.py3-none-any.whl (461 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 461.2/461.2 KB 68.9 MB/s eta 0:00:00
Collecting platformdirs<3,>=2
Downloading platformdirs-2.5.2-py3-none-any.whl (14 kB)
Installing collected packages: nodeenv, distlib, toml, six, pyyaml, platformdirs, identify, filelock, cfgv, virtualenv, pre-commit
Successfully installed cfgv-3.3.1 distlib-0.3.4 filelock-3.6.0 identify-2.5.0 nodeenv-1.6.0 platformdirs-2.5.2 pre-commit-2.18.1 pyyaml-6.0 six-1.16.0 toml-0.10.2 virtualenv-20.14.1
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports.
already up to date.
Updating https://github.com/jendrikseipp/vulture ... [INFO] Initializing environment for https://github.com/jendrikseipp/vulture.
already up to date.
Updating https://github.com/python/black ... [INFO] Initializing environment for https://github.com/python/black.
already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
updating v4.1.0 -> v4.2.0.
Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint.
already up to date.
Updating https://github.com/PyCQA/flake8 ... [INFO] Initializing environment for https://github.com/PyCQA/flake8.
already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
already up to date.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
updating v2.31.1 -> v2.32.0.
Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks.
already up to date.
Updating https://github.com/sphinx-contrib/sphinx-lint ... [INFO] Initializing environment for https://github.com/sphinx-contrib/sphinx-lint.
updating v0.2 -> v0.4.1.
Updating https://github.com/asottile/yesqa ... [INFO] Initializing environment for https://github.com/asottile/yesqa.
already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Initializing environment for https://github.com/PyCQA/flake8:flake8-bugbear==21.3.2,flake8-comprehensions==3.7.0,flake8==4.0.1,pandas-dev-flaker==0.5.0.
[INFO] Initializing environment for https://github.com/asottile/yesqa:flake8-bugbear==21.3.2,flake8-comprehensions==3.7.0,flake8==4.0.1,pandas-dev-flaker==0.5.0.
[INFO] Initializing environment for local:pyright@1.1.230.
[INFO] Initializing environment for local:flake8-rst==0.7.0,flake8==3.7.9.
[INFO] Initializing environment for local:pyyaml,toml.
[INFO] Initializing environment for local:pyyaml.
[INFO] Initializing environment for local.
[INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/jendrikseipp/vulture.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/cpplint/cpplint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/sphinx-contrib/sphinx-lint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
absolufy-imports........................................................................................Passed
vulture.................................................................................................Passed
black...................................................................................................Passed
codespell...............................................................................................Passed
debug statements (python)...............................................................................Passed
fix end of files........................................................................................Passed
trim trailing whitespace................................................................................Passed
cpplint.................................................................................................Passed
flake8..................................................................................................Passed
isort...................................................................................................Passed
pyupgrade...............................................................................................Passed
rst ``code`` is two backticks...........................................................................Passed
rst directives end with two colons......................................................................Passed
rst ``inline code`` next to normal text.................................................................Passed
Sphinx lint.............................................................................................Passed
Strip unnecessary `# noqa`s.............................................................................Passed
flake8-rst..............................................................................................Failed
- hook id: flake8-rst
- exit code: 1
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 35
df.plot.<TAB> # noqa: E225, E999
^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 203
df.foo<TAB> # noqa: E225, E999
^^^^^^^^^^^^^^^^^^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Unwanted patterns.......................................................................................Passed
Check Cython casting is `<type>obj`, not `<type> obj`...................................................Passed
Check for backticks incorrectly rendering because of missing spaces.....................................Passed
Check for unnecessary random seeds in asv benchmarks....................................................Passed
Check for usage of numpy testing or array_equal.........................................................Passed
Check for invalid EA testing............................................................................Passed
Generate pip dependency from conda......................................................................Passed
Check flake8 version is synced across flake8, yesqa, and environment.yml................................Passed
Validate correct capitalization among titles in documentation...........................................Passed
Import pandas.array as pd_array in core.................................................................Passed
Use pandas.io.common.urlopen instead of urllib.request.urlopen..........................................Passed
Use bool_t instead of bool in pandas/core/generic.py....................................................Passed
Ensure pandas errors are documented in doc/source/reference/testing.rst.................................Passed
Check for pg8000 not installed on CI for test_pg8000_sqlalchemy_passthrough_error.......................Passed
Check minimum version of dependencies are aligned.......................................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/46915 | 2022-05-01T07:08:27Z | 2022-05-01T09:15:29Z | 2022-05-01T09:15:29Z | 2022-05-07T03:09:21Z |
TYP: fix type annotation in _has_externally_shared_axis | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 30af4f90d6869..bfbf77e85afd3 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -328,13 +328,13 @@ def _remove_labels_from_axis(axis: Axis):
axis.get_label().set_visible(False)
-def _has_externally_shared_axis(ax1: matplotlib.axes, compare_axis: str) -> bool:
+def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool:
"""
Return whether an axis is externally shared.
Parameters
----------
- ax1 : matplotlib.axes
+ ax1 : matplotlib.axes.Axes
Axis to query.
compare_axis : str
`"x"` or `"y"` according to whether the X-axis or Y-axis is being
| `from matplotlib import axes` is a module, not the axes class. | https://api.github.com/repos/pandas-dev/pandas/pulls/46914 | 2022-05-01T04:17:25Z | 2022-05-07T02:29:24Z | 2022-05-07T02:29:24Z | 2022-05-26T01:59:20Z |
CI: More targeted pyarrow version testing | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index 8a16ef4020b14..f5cbb0e88ff11 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -28,7 +28,7 @@ jobs:
pattern: ["not single_cpu", "single_cpu"]
# Don't test pyarrow v2/3: Causes timeouts in read_csv engine
# even if tests are skipped/xfailed
- pyarrow_version: ["5", "7"]
+ pyarrow_version: ["5", "6", "7"]
include:
- name: "Downstream Compat"
env_file: actions-38-downstream_compat.yaml
@@ -62,6 +62,15 @@ jobs:
pattern: "not slow and not network and not single_cpu"
pandas_testing_mode: "deprecate"
test_args: "-W error::DeprecationWarning:numpy"
+ exclude:
+ - env_file: actions-39.yaml
+ pyarrow_version: "6"
+ - env_file: actions-39.yaml
+ pyarrow_version: "7"
+ - env_file: actions-310.yaml
+ pyarrow_version: "6"
+ - env_file: actions-310.yaml
+ pyarrow_version: "7"
fail-fast: false
name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }}
env:
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
Hoping to supersede https://github.com/pandas-dev/pandas/pull/46386. cc @lithomas1
While the job queue bandwidth is larger now, probably don't need to test different pyarrow versions with different Python versions. Instead:
* PY38 will test pyarrow 5, 6, 7
* PY39 and PY310 will just test pyarrow 5 | https://api.github.com/repos/pandas-dev/pandas/pulls/46912 | 2022-05-01T00:56:14Z | 2022-05-02T14:59:26Z | 2022-05-02T14:59:25Z | 2022-05-02T21:37:08Z |
BUG: added finalize to merge, GH28283 | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 931d18dc349f3..2d20951a5e8a2 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -659,7 +659,7 @@ Metadata
^^^^^^^^
- Fixed metadata propagation in :meth:`DataFrame.melt` (:issue:`28283`)
- Fixed metadata propagation in :meth:`DataFrame.explode` (:issue:`28283`)
--
+- Fixed metadata propagation in :meth:`DataFrame.merge` (:issue:`28283`)
Other
^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ef5e6dd1d6757..1314e55988e7f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9708,7 +9708,7 @@ def merge(
copy=copy,
indicator=indicator,
validate=validate,
- )
+ ).__finalize__(self, method="merge")
def round(
self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 540a30e691d3c..b65fe502a6d9a 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -184,13 +184,10 @@
"ignore:.*append method is deprecated.*:FutureWarning"
),
),
- pytest.param(
- (
- pd.DataFrame,
- frame_data,
- operator.methodcaller("merge", pd.DataFrame({"A": [1]})),
- ),
- marks=not_implemented_mark,
+ (
+ pd.DataFrame,
+ frame_data,
+ operator.methodcaller("merge", pd.DataFrame({"A": [1]})),
),
pytest.param(
(pd.DataFrame, frame_data, operator.methodcaller("round", 2)),
| Progress towards #28283
This PR gives the `merge` method the ability to propagate metadata using `__finalize__`.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46911 | 2022-05-01T00:55:13Z | 2022-05-01T03:21:32Z | null | 2022-05-01T03:21:32Z |
DOC: update shortened link to full (#46899) | diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 1904ce32f3170..7421645baa463 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -434,9 +434,9 @@ def _str_rstrip(self, to_strip=None):
return self._str_map(lambda x: x.rstrip(to_strip))
def _str_removeprefix(self, prefix: str) -> Series:
- # outstanding question on whether to use native methods for users
- # on Python 3.9+ https://bit.ly/3LuMeRn, in which case we could do
- # return self._str_map(str.removeprefix)
+ # outstanding question on whether to use native methods for users on Python 3.9+
+ # https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770,
+ # in which case we could do return self._str_map(str.removeprefix)
def removeprefix(text: str) -> str:
if text.startswith(prefix):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46907 | 2022-04-30T16:13:27Z | 2022-04-30T18:37:20Z | 2022-04-30T18:37:20Z | 2022-04-30T18:37:37Z |
DEPR: numeric_only default in DataFrame methods with None/True | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 931d18dc349f3..aa42d4236484b 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -120,7 +120,7 @@ Other enhancements
- :meth:`DataFrame.reset_index` now accepts a ``names`` argument which renames the index names (:issue:`6878`)
- :meth:`pd.concat` now raises when ``levels`` is given but ``keys`` is None (:issue:`46653`)
- :meth:`pd.concat` now raises when ``levels`` contains duplicate values (:issue:`46653`)
-- Added ``numeric_only`` argument to :meth:`DataFrame.corr`, :meth:`DataFrame.corrwith`, :meth:`DataFrame.cov`, :meth:`DataFrame.idxmin`, :meth:`DataFrame.idxmax`, :meth:`.GroupBy.idxmin`, :meth:`.GroupBy.idxmax`, :meth:`.GroupBy.var`, :meth:`.GroupBy.std`, :meth:`.GroupBy.sem`, and :meth:`.GroupBy.quantile` (:issue:`46560`)
+- Added ``numeric_only`` argument to :meth:`DataFrame.corr`, :meth:`DataFrame.corrwith`, :meth:`DataFrame.cov`, :meth:`DataFrame.idxmin`, :meth:`DataFrame.idxmax`, :meth:`.DataFrameGroupBy.idxmin`, :meth:`.DataFrameGroupBy.idxmax`, :meth:`.GroupBy.var`, :meth:`.GroupBy.std`, :meth:`.GroupBy.sem`, and :meth:`.DataFrameGroupBy.quantile` (:issue:`46560`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`, :issue:`46725`)
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
@@ -426,6 +426,48 @@ As ``group_keys=True`` is the default value of :meth:`DataFrame.groupby` and
raise a ``FutureWarning``. This can be silenced and the previous behavior
retained by specifying ``group_keys=False``.
+.. _whatsnew_150.deprecations.numeric_only_default:
+
+``numeric_only`` default value
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Across the DataFrame operations such as ``min``, ``sum``, and ``idxmax``, the default
+value of the ``numeric_only`` argument, if it exists at all, was inconsistent.
+Furthermore, operations with the default value ``None`` can lead to surprising
+results. (:issue:`46560`)
+
+.. code-block:: ipython
+
+ In [1]: df = pd.DataFrame({"a": [1, 2], "b": ["x", "y"]})
+
+ In [2]: # Reading the next line without knowing the contents of df, one would
+ # expect the result to contain the products for both columns a and b.
+ df[["a", "b"]].prod()
+ Out[2]:
+ a 2
+ dtype: int64
+
+To avoid this behavior, the specifying the value ``numeric_only=None`` has been
+deprecated, and will be removed in a future version of pandas. In the future,
+all operations with a ``numeric_only`` argument will default to ``False``. Users
+should either call the operation only with columns that can be operated on, or
+specify ``numeric_only=True`` to operate only on Boolean, integer, and float columns.
+
+In order to support the transition to the new behavior, the following methods have
+gained the ``numeric_only`` argument.
+
+- :meth:`DataFrame.corr`
+- :meth:`DataFrame.corrwith`
+- :meth:`DataFrame.cov`
+- :meth:`DataFrame.idxmin`
+- :meth:`DataFrame.idxmax`
+- :meth:`.DataFrameGroupBy.idxmin`
+- :meth:`.DataFrameGroupBy.idxmax`
+- :meth:`.GroupBy.var`
+- :meth:`.GroupBy.std`
+- :meth:`.GroupBy.sem`
+- :meth:`.DataFrameGroupBy.quantile`
+
.. _whatsnew_150.deprecations.other:
Other Deprecations
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 90f665362ef56..098b501cc95c9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -635,3 +635,63 @@ def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
list of column names with the None values replaced.
"""
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
+
+
+def resolve_numeric_only(numeric_only: bool | None | lib.NoDefault) -> bool:
+ """Determine the Boolean value of numeric_only.
+
+ See GH#46560 for details on the deprecation.
+
+ Parameters
+ ----------
+ numeric_only : bool, None, or lib.no_default
+ Value passed to the method.
+
+ Returns
+ -------
+ Resolved value of numeric_only.
+ """
+ if numeric_only is lib.no_default:
+ # Methods that behave like numeric_only=True and only got the numeric_only
+ # arg in 1.5.0 default to lib.no_default
+ result = True
+ elif numeric_only is None:
+ # Methods that had the numeric_only arg prior to 1.5.0 and try all columns
+ # first default to None
+ result = False
+ else:
+ result = cast(bool, numeric_only)
+ return result
+
+
+def deprecate_numeric_only_default(cls: type, name: str, deprecate_none: bool = False):
+ """Emit FutureWarning message for deprecation of numeric_only.
+
+ See GH#46560 for details on the deprecation.
+
+ Parameters
+ ----------
+ cls : type
+ pandas type that is generating the warning.
+ name : str
+ Name of the method that is generating the warning.
+ deprecate_none : bool, default False
+ Whether to also warn about the deprecation of specifying ``numeric_only=None``.
+ """
+ if name in ["all", "any"]:
+ arg_name = "bool_only"
+ else:
+ arg_name = "numeric_only"
+
+ msg = (
+ f"The default value of {arg_name} in {cls.__name__}.{name} is "
+ "deprecated. In a future version, it will default to False. "
+ )
+ if deprecate_none:
+ msg += f"In addition, specifying '{arg_name}=None' is deprecated. "
+ msg += (
+ f"Select only valid columns or specify the value of {arg_name} to silence "
+ "this warning."
+ )
+
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ef5e6dd1d6757..84ea8df0b9b20 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9833,7 +9833,7 @@ def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
min_periods: int = 1,
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
@@ -9859,6 +9859,10 @@ def corr(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
DataFrame
@@ -9897,10 +9901,11 @@ def corr(
dogs 1.0 NaN
cats NaN 1.0
""" # noqa:E501
- if numeric_only:
- data = self._get_numeric_data()
- else:
- data = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ data = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "corr")
+
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
@@ -9946,7 +9951,7 @@ def cov(
self,
min_periods: int | None = None,
ddof: int | None = 1,
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
@@ -9983,6 +9988,10 @@ def cov(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
DataFrame
@@ -10051,10 +10060,11 @@ def cov(
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
- if numeric_only:
- data = self._get_numeric_data()
- else:
- data = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ data = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "cov")
+
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
@@ -10077,7 +10087,7 @@ def corrwith(
axis: Axis = 0,
drop=False,
method="pearson",
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> Series:
"""
Compute pairwise correlation.
@@ -10110,6 +10120,10 @@ def corrwith(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
Series
@@ -10141,10 +10155,10 @@ def corrwith(
dtype: float64
""" # noqa:E501
axis = self._get_axis_number(axis)
- if numeric_only:
- this = self._get_numeric_data()
- else:
- this = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ this = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(this.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "corrwith")
# GH46174: when other is a Series object and axis=0, we achieve a speedup over
# passing .corr() to .apply() by taking the columns as ndarrays and iterating
@@ -10396,7 +10410,6 @@ def _reduce(
filter_type=None,
**kwds,
):
-
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
@@ -10451,6 +10464,7 @@ def _get_data() -> DataFrame:
data = self._get_bool_data()
return data
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
@@ -10458,7 +10472,7 @@ def _get_data() -> DataFrame:
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
- if numeric_only is True:
+ if numeric_only_bool:
df = _get_data()
if axis == 1:
df = df.T
@@ -10479,16 +10493,8 @@ def _get_data() -> DataFrame:
if numeric_only is None and out.shape[0] != df.shape[1]:
# columns have been dropped GH#41480
- arg_name = "numeric_only"
- if name in ["all", "any"]:
- arg_name = "bool_only"
- warnings.warn(
- "Dropping of nuisance columns in DataFrame reductions "
- f"(with '{arg_name}=None') is deprecated; in a future "
- "version this will raise TypeError. Select only valid "
- "columns before calling the reduction.",
- FutureWarning,
- stacklevel=find_stack_level(),
+ com.deprecate_numeric_only_default(
+ type(self), name, deprecate_none=True
)
return out
@@ -10776,6 +10782,11 @@ def quantile(
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
+
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
@@ -10833,15 +10844,8 @@ def quantile(
axis = self._get_axis_number(axis)
any_not_numeric = any(not is_numeric_dtype(x) for x in self.dtypes)
if numeric_only is no_default and any_not_numeric:
- warnings.warn(
- "In future versions of pandas, numeric_only will be set to "
- "False by default, and the datetime/timedelta columns will "
- "be considered in the results. To not consider these columns"
- "specify numeric_only=True.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- numeric_only = True
+ com.deprecate_numeric_only_default(type(self), "quantile")
+ numeric_only = com.resolve_numeric_only(numeric_only)
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c615216240d60..1a31a50606c2c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11554,6 +11554,11 @@ def _doc_params(cls):
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
+
+ .. deprecated:: 1.5.0
+ Specifying ``numeric_only=None`` is deprecated. The default value will be
+ ``False`` in a future version of pandas.
+
{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
@@ -11584,6 +11589,10 @@ def _doc_params(cls):
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
+ .. deprecated:: 1.5.0
+ Specifying ``numeric_only=None`` is deprecated. The default value will be
+ ``False`` in a future version of pandas.
+
Returns
-------
{name1} or {name2} (if level specified) \
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 3a86aa05fb227..2f0a4195d2f74 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -41,7 +41,10 @@ def test_cov(self, float_frame, float_string_frame):
tm.assert_almost_equal(result["A"]["C"], expected)
# exclude non-numeric types
- result = float_string_frame.cov()
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
@@ -116,7 +119,10 @@ def test_corr_scipy_method(self, float_frame, method):
def test_corr_non_numeric(self, float_string_frame):
# exclude non-numeric types
- result = float_string_frame.corr()
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = float_string_frame.corr()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
tm.assert_frame_equal(result, expected)
@@ -307,11 +313,17 @@ def test_corrwith_with_objects(self):
df1["obj"] = "foo"
df2["obj"] = "bar"
- result = df1.corrwith(df2)
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
- result = df1.corrwith(df2, axis=1)
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 41deeec7c4b57..7f2a13862f4ed 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1,11 +1,13 @@
from datetime import timedelta
from decimal import Decimal
+import inspect
import re
from dateutil.tz import tzlocal
import numpy as np
import pytest
+from pandas._libs import lib
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
@@ -1752,7 +1754,9 @@ def test_groupby_regular_arithmetic_equivalent(meth):
def test_frame_mixed_numeric_object_with_timestamp(ts_value):
# GH 13912
df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]})
- with tm.assert_produces_warning(FutureWarning, match="Dropping of nuisance"):
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
result = df.sum()
expected = Series([1, 1.1, "foo"], index=list("abc"))
tm.assert_series_equal(result, expected)
@@ -1786,3 +1790,60 @@ def test_reduction_axis_none_deprecation(method):
expected = meth()
tm.assert_series_equal(res, expected)
tm.assert_series_equal(res, meth(axis=0))
+
+
+@pytest.mark.parametrize(
+ "kernel",
+ [
+ "corr",
+ "corrwith",
+ "count",
+ "cov",
+ "idxmax",
+ "idxmin",
+ "kurt",
+ "kurt",
+ "max",
+ "mean",
+ "median",
+ "min",
+ "mode",
+ "prod",
+ "prod",
+ "quantile",
+ "sem",
+ "skew",
+ "std",
+ "sum",
+ "var",
+ ],
+)
+def test_numeric_only_deprecation(kernel):
+ # GH#46852
+ df = DataFrame({"a": [1, 2, 3], "b": object})
+ args = (df,) if kernel == "corrwith" else ()
+ signature = inspect.signature(getattr(DataFrame, kernel))
+ default = signature.parameters["numeric_only"].default
+ assert default is not True
+
+ if kernel in ("idxmax", "idxmin"):
+ # kernels that default to numeric_only=False and fail on nuisance columns
+ assert default is False
+ with pytest.raises(TypeError, match="not allowed for this dtype"):
+ getattr(df, kernel)(*args)
+ else:
+ if default is None or default is lib.no_default:
+ expected = getattr(df[["a"]], kernel)(*args)
+ warn = FutureWarning
+ else:
+ # default must be False and works on any nuisance columns
+ expected = getattr(df, kernel)(*args)
+ if kernel == "mode":
+ assert "b" in expected.columns
+ else:
+ assert "b" in expected.index
+ warn = None
+ msg = f"The default value of numeric_only in DataFrame.{kernel}"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = getattr(df, kernel)(*args)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index a5834dd237c01..b5bae4759090a 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -807,7 +807,13 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
resampled = df.resample("Y")
func = getattr(resampled, method)
- result = func(numeric_only=numeric_only)
+ if method == "prod" and numeric_only is not True:
+ warn = FutureWarning
+ else:
+ warn = None
+ msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = func(numeric_only=numeric_only)
expected = DataFrame(expected_data, index=expected_index)
tm.assert_frame_equal(result, expected)
| - [x] closes #46852 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
groupby ops are up next for deprecation, then onto resample and window. | https://api.github.com/repos/pandas-dev/pandas/pulls/46906 | 2022-04-30T15:47:33Z | 2022-05-05T01:22:56Z | 2022-05-05T01:22:55Z | 2022-07-26T20:44:02Z |
TYP/CI: bump mypy&pyright | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index d4a2bedcfba1a..7d24b26f5538b 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -74,7 +74,7 @@ jobs:
- name: Install pyright
# note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.245
+ run: npm install -g pyright@1.1.247
- name: Build Pandas
id: build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fac09fcf70511..9469a34c8aacd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -89,7 +89,7 @@ repos:
types: [python]
stages: [manual]
# note: keep version in sync with .github/workflows/code-checks.yml
- additional_dependencies: ['pyright@1.1.245']
+ additional_dependencies: ['pyright@1.1.247']
- repo: local
hooks:
- id: flake8-rst
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9f1c4755bc54f..128fd68674f96 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -360,7 +360,7 @@ If installed, we now require:
+-----------------+-----------------+----------+---------+
| Package | Minimum Version | Required | Changed |
+=================+=================+==========+=========+
-| mypy (dev) | 0.941 | | X |
+| mypy (dev) | 0.950 | | X |
+-----------------+-----------------+----------+---------+
diff --git a/environment.yml b/environment.yml
index dc3cba3be2132..b4710e252384c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -24,7 +24,7 @@ dependencies:
- flake8-bugbear=21.3.2 # used by flake8, find likely bugs
- flake8-comprehensions=3.7.0 # used by flake8, linting of unnecessary comprehensions
- isort>=5.2.1 # check that imports are in the right order
- - mypy=0.941
+ - mypy=0.950
- pre-commit>=2.9.2
- pycodestyle # used by flake8
- pyupgrade
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 2a487fa4b6877..2e1ef31033d71 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -45,7 +45,9 @@ def set_locale(
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
- yield ".".join(normalized_locale)
+ # error: Argument 1 to "join" of "str" has incompatible type
+ # "Tuple[Optional[str], Optional[str]]"; expected "Iterable[str]"
+ yield ".".join(normalized_locale) # type: ignore[arg-type]
else:
yield new_locale
finally:
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index 13daba5cfcbdf..4be9621a594dc 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -113,7 +113,7 @@ class Timestamp(datetime):
def time(self) -> _time: ...
def timetz(self) -> _time: ...
def replace(
- self,
+ self: _DatetimeT,
year: int = ...,
month: int = ...,
day: int = ...,
@@ -123,7 +123,7 @@ class Timestamp(datetime):
microsecond: int = ...,
tzinfo: _tzinfo | None = ...,
fold: int = ...,
- ) -> datetime: ...
+ ) -> _DatetimeT: ...
def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ...
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index a7bb9520841b6..3616e3512c6fe 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -957,7 +957,7 @@ def equals(self, other) -> bool:
return array_equivalent(left, right, dtype_equal=True)
def _quantile(
- self: BaseMaskedArrayT, qs: npt.NDArray[np.float64], interpolation: str
+ self, qs: npt.NDArray[np.float64], interpolation: str
) -> BaseMaskedArray:
"""
Dispatch to quantile_with_mask, needed because we do not have
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 2851ea36c8a33..97ec297db8ba8 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -7,6 +7,7 @@
from enum import Enum
import itertools
from typing import (
+ Any,
Callable,
DefaultDict,
Hashable,
@@ -1027,26 +1028,14 @@ def _get_empty_meta(
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
+ dtype_dict: defaultdict[Hashable, Any]
if not is_dict_like(dtype):
# if dtype == None, default will be object.
default_dtype = dtype or object
- # error: Argument 1 to "defaultdict" has incompatible type "Callable[[],
- # Union[ExtensionDtype, str, dtype[Any], Type[object], Dict[Hashable,
- # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
- # Type[int], Type[complex], Type[bool], Type[object]]]]]"; expected
- # "Optional[Callable[[], Union[ExtensionDtype, str, dtype[Any],
- # Type[object]]]]"
- # error: Incompatible return value type (got "Union[ExtensionDtype, str,
- # dtype[Any], Type[object], Dict[Hashable, Union[ExtensionDtype, Union[str,
- # dtype[Any]], Type[str], Type[float], Type[int], Type[complex], Type[bool],
- # Type[object]]]]", expected "Union[ExtensionDtype, str, dtype[Any],
- # Type[object]]")
- dtype = defaultdict(
- lambda: default_dtype # type: ignore[arg-type, return-value]
- )
+ dtype_dict = defaultdict(lambda: default_dtype)
else:
dtype = cast(dict, dtype)
- dtype = defaultdict(
+ dtype_dict = defaultdict(
lambda: object,
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
)
@@ -1063,14 +1052,16 @@ def _get_empty_meta(
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
- data = [Series([], dtype=dtype[name]) for name in index_names]
+ data = [Series([], dtype=dtype_dict[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
- col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
+ col_dict = {
+ col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
+ }
return index, columns, col_dict
diff --git a/requirements-dev.txt b/requirements-dev.txt
index a3f71ac2a3aa5..0f1d76b996df1 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -12,7 +12,7 @@ flake8==4.0.1
flake8-bugbear==21.3.2
flake8-comprehensions==3.7.0
isort>=5.2.1
-mypy==0.941
+mypy==0.950
pre-commit>=2.9.2
pycodestyle
pyupgrade
| Mypy aims to have a monthly release. | https://api.github.com/repos/pandas-dev/pandas/pulls/46905 | 2022-04-30T13:08:54Z | 2022-05-15T15:33:17Z | 2022-05-15T15:33:17Z | 2022-05-30T07:02:51Z |
BUG: DatetimeIndex.resolution with nanosecond reso | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 128fd68674f96..4e2f547d7d2dc 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -553,6 +553,8 @@ Other Deprecations
- Deprecated the ``closed`` argument in :meth:`interval_range` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`)
- Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`)
+- Deprecated indexing on a timezone-naive :class:`DatetimeIndex` using a string representing a timezone-aware datetime (:issue:`46903`, :issue:`36148`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
@@ -594,6 +596,7 @@ Datetimelike
- Bug in :meth:`Index.astype` when casting from object dtype to ``timedelta64[ns]`` dtype incorrectly casting ``np.datetime64("NaT")`` values to ``np.timedelta64("NaT")`` instead of raising (:issue:`45722`)
- Bug in :meth:`SeriesGroupBy.value_counts` index when passing categorical column (:issue:`44324`)
- Bug in :meth:`DatetimeIndex.tz_localize` localizing to UTC failing to make a copy of the underlying data (:issue:`46460`)
+- Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`)
-
Timedelta
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 700f66840f128..9492888e7db77 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -60,6 +60,10 @@ from pandas._libs.tslibs.nattype cimport (
)
from pandas._libs.tslibs.timestamps cimport _Timestamp
+from pandas._libs.tslibs import (
+ Resolution,
+ get_resolution,
+)
from pandas._libs.tslibs.timestamps import Timestamp
# Note: this is the only non-tslibs intra-pandas dependency here
@@ -122,11 +126,11 @@ def format_array_from_datetime(
"""
cdef:
int64_t val, ns, N = len(values)
- ndarray[int64_t] consider_values
bint show_ms = False, show_us = False, show_ns = False
bint basic_format = False
ndarray[object] result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
- object ts, res
+ _Timestamp ts
+ str res
npy_datetimestruct dts
if na_rep is None:
@@ -136,16 +140,10 @@ def format_array_from_datetime(
# a format based on precision
basic_format = format is None and tz is None
if basic_format:
- consider_values = values[values != NPY_NAT]
- show_ns = (consider_values % 1000).any()
-
- if not show_ns:
- consider_values //= 1000
- show_us = (consider_values % 1000).any()
-
- if not show_ms:
- consider_values //= 1000
- show_ms = (consider_values % 1000).any()
+ reso_obj = get_resolution(values)
+ show_ns = reso_obj == Resolution.RESO_NS
+ show_us = reso_obj == Resolution.RESO_US
+ show_ms = reso_obj == Resolution.RESO_MS
for i in range(N):
val = values[i]
@@ -178,6 +176,7 @@ def format_array_from_datetime(
# invalid format string
# requires dates > 1900
try:
+ # Note: dispatches to pydatetime
result[i] = ts.strftime(format)
except ValueError:
result[i] = str(ts)
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 31d0579900abd..511ce26feeefa 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -204,7 +204,9 @@ def ints_to_pydatetime(
cdef inline c_Resolution _reso_stamp(npy_datetimestruct *dts):
- if dts.us != 0:
+ if dts.ps != 0:
+ return c_Resolution.RESO_NS
+ elif dts.us != 0:
if dts.us % 1000 == 0:
return c_Resolution.RESO_MS
return c_Resolution.RESO_US
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 5274f68eb3171..806d081c0176b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -593,7 +593,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
end = self._maybe_cast_for_get_loc(end)
return start, end
- def _deprecate_mismatched_indexing(self, key) -> None:
+ def _deprecate_mismatched_indexing(self, key, one_way: bool = False) -> None:
# GH#36148
# we get here with isinstance(key, self._data._recognized_scalars)
try:
@@ -606,6 +606,10 @@ def _deprecate_mismatched_indexing(self, key) -> None:
"raise KeyError in a future version. "
"Use a timezone-naive object instead."
)
+ elif one_way:
+ # we special-case timezone-naive strings and timezone-aware
+ # DatetimeIndex
+ return
else:
msg = (
"Indexing a timezone-aware DatetimeIndex with a "
@@ -640,6 +644,7 @@ def get_loc(self, key, method=None, tolerance=None):
parsed, reso = self._parse_with_reso(key)
except ValueError as err:
raise KeyError(key) from err
+ self._deprecate_mismatched_indexing(parsed, one_way=True)
if self._can_partial_date_slice(reso):
try:
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 332ab02255911..8d498b59c55d1 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -10,6 +10,22 @@
class TestDatetimeIndex:
+ def test_get_loc_naive_dti_aware_str_deprecated(self):
+ # GH#46903
+ ts = Timestamp("20130101").value
+ dti = pd.DatetimeIndex([ts + 50 + i for i in range(100)])
+ ser = Series(range(100), index=dti)
+
+ key = "2013-01-01 00:00:00.000000050+0000"
+ msg = "Indexing a timezone-naive DatetimeIndex with a timezone-aware datetime"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = ser[key]
+ assert res == 0
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ loc = dti.get_loc(key)
+ assert loc == 0
+
def test_indexing_with_datetime_tz(self):
# GH#8260
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 5557322eae42d..4381aa3f34f8d 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -23,9 +23,12 @@ def test_asof_nanosecond_index_access(self):
first_value = ser.asof(ser.index[0])
+ # GH#46903 previously incorrectly was "day"
+ assert dti.resolution == "nanosecond"
+
# this used to not work bc parsing was done by dateutil that didn't
# handle nanoseconds
- assert first_value == ser["2013-01-01 00:00:00.000000050+0000"]
+ assert first_value == ser["2013-01-01 00:00:00.000000050"]
expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")
assert first_value == ser[Timestamp(expected_ts)]
diff --git a/pandas/tests/tslibs/test_resolution.py b/pandas/tests/tslibs/test_resolution.py
new file mode 100644
index 0000000000000..15f4a9d032e5c
--- /dev/null
+++ b/pandas/tests/tslibs/test_resolution.py
@@ -0,0 +1,13 @@
+import numpy as np
+
+from pandas._libs.tslibs import (
+ Resolution,
+ get_resolution,
+)
+
+
+def test_get_resolution_nano():
+ # don't return the fallback RESO_DAY
+ arr = np.array([1], dtype=np.int64)
+ res = get_resolution(arr)
+ assert res == Resolution.RESO_NS
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46903 | 2022-04-30T02:14:50Z | 2022-05-19T13:12:04Z | 2022-05-19T13:12:03Z | 2022-05-19T15:12:06Z |
ENH: fields.get_start_end_field support non-nano | diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py
index 23ae73811204c..203afcdaa7378 100644
--- a/asv_bench/benchmarks/tslibs/fields.py
+++ b/asv_bench/benchmarks/tslibs/fields.py
@@ -66,9 +66,9 @@ class TimeGetStartEndField:
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
- self.i8data = arr
+ self.dt64data = arr.view("M8[ns]")
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
- get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
+ get_start_end_field(self.dt64data, self.attrname, freqstr, month_kw=month_kw)
diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi
index 571a327b46df8..228f7dbdf5eac 100644
--- a/pandas/_libs/tslibs/fields.pyi
+++ b/pandas/_libs/tslibs/fields.pyi
@@ -12,7 +12,7 @@ def get_date_name_field(
locale: str | None = ...,
) -> npt.NDArray[np.object_]: ...
def get_start_end_field(
- dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ dt64values: npt.NDArray[np.datetime64],
field: str,
freqstr: str | None = ...,
month_kw: int = ...,
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index cc82deec08a28..e8980dc1a7553 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -5,8 +5,10 @@ objects and arrays
from locale import LC_TIME
from _strptime import LocaleTime
+
cimport cython
from cython cimport Py_ssize_t
+
import numpy as np
cimport numpy as cnp
@@ -41,8 +43,11 @@ from pandas._libs.tslibs.ccalendar cimport (
)
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
+ NPY_DATETIMEUNIT,
dt64_to_dtstruct,
+ get_unit_from_dtype,
npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
pandas_timedeltastruct,
td64_to_tdstruct,
)
@@ -196,22 +201,35 @@ cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_start_end_field(const int64_t[:] dtindex, str field,
+def get_start_end_field(ndarray dt64values, str field,
str freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
(defined by frequency).
+
+ Parameters
+ ----------
+ dt64values : ndarray[datetime64], any resolution
+ field : str
+ frestr : str or None, default None
+ month_kw : int, default 12
+
+ Returns
+ -------
+ ndarray[bool]
"""
cdef:
Py_ssize_t i
- int count = len(dtindex)
+ int count = dt64values.size
bint is_business = 0
int end_month = 12
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
int compare_month, modby
+ ndarray dtindex = dt64values.view("i8")
+ NPY_DATETIMEUNIT reso = get_unit_from_dtype(dt64values.dtype)
out = np.zeros(count, dtype='int8')
@@ -251,7 +269,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_firstbday(dts.year, dts.month)):
@@ -263,7 +281,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and dts.day == 1:
out[i] = 1
@@ -275,7 +293,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_lastbday(dts.year, dts.month)):
@@ -287,7 +305,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_days_in_month(dts.year, dts.month)):
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index db951027e5794..e7ac855d6a832 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -487,9 +487,7 @@ cdef class _Timestamp(ABCTimestamp):
dict kwds
ndarray[uint8_t, cast=True] out
int month_kw
-
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
+ str unit
if freq:
kwds = freq.kwds
@@ -500,7 +498,9 @@ cdef class _Timestamp(ABCTimestamp):
freqstr = None
val = self._maybe_convert_value_to_local()
- out = get_start_end_field(np.array([val], dtype=np.int64),
+
+ unit = npy_unit_to_abbrev(self._reso)
+ out = get_start_end_field(np.array([val], dtype=f"M8[{unit}]"),
field, freqstr, month_kw)
return out[0]
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5c8c6d7fe23a3..1e409dc17a06d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -130,7 +130,7 @@ def f(self):
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
- values, field, self.freqstr, month_kw
+ values.view(self._ndarray.dtype), field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index ab7bc4c7cb412..bc9e6c0131646 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -732,6 +732,27 @@ def test_non_nano_fields(self, dt64, ts):
assert ts.weekday() == alt.weekday()
assert ts.isoweekday() == alt.isoweekday()
+ def test_start_end_fields(self, ts):
+ assert ts.is_year_start
+ assert ts.is_quarter_start
+ assert ts.is_month_start
+ assert not ts.is_year_end
+ assert not ts.is_month_end
+ assert not ts.is_month_end
+
+ freq = offsets.BDay()
+ ts._set_freq(freq)
+
+ # 2016-01-01 is a Friday, so is year/quarter/month start with this freq
+ msg = "Timestamp.freq is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert ts.is_year_start
+ assert ts.is_quarter_start
+ assert ts.is_month_start
+ assert not ts.is_year_end
+ assert not ts.is_month_end
+ assert not ts.is_month_end
+
def test_repr(self, dt64, ts):
alt = Timestamp(dt64)
diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py
index 9e6464f7727bd..528d08d7f499b 100644
--- a/pandas/tests/tslibs/test_fields.py
+++ b/pandas/tests/tslibs/test_fields.py
@@ -28,7 +28,10 @@ def test_get_date_field_readonly(dtindex):
def test_get_start_end_field_readonly(dtindex):
- result = fields.get_start_end_field(dtindex, "is_month_start", None)
+ dt64values = dtindex.view("M8[ns]")
+ dt64values.flags.writeable = False
+
+ result = fields.get_start_end_field(dt64values, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
diff --git a/setup.py b/setup.py
index 67b91c55dd397..bca919a3aa6f8 100755
--- a/setup.py
+++ b/setup.py
@@ -506,6 +506,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
"_libs.tslibs.fields": {
"pyxfile": "_libs/tslibs/fields",
"depends": tseries_depends,
+ "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
"_libs.tslibs.np_datetime": {
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46902 | 2022-04-30T01:36:32Z | 2022-05-05T22:08:23Z | 2022-05-05T22:08:23Z | 2022-05-05T22:14:20Z |
ENH: allow non-nano in DatetimeArray, TimedeltaArray._simple_new | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 7cbc1833093ba..b3a006141fadc 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -25,6 +25,7 @@
"Tick",
"BaseOffset",
"tz_compare",
+ "is_unitless",
]
from pandas._libs.tslibs import dtypes
@@ -39,6 +40,7 @@
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
+ is_unitless,
)
from pandas._libs.tslibs.offsets import (
BaseOffset,
diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi
index 59f4427125266..27871a78f8aaf 100644
--- a/pandas/_libs/tslibs/np_datetime.pyi
+++ b/pandas/_libs/tslibs/np_datetime.pyi
@@ -9,3 +9,4 @@ def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ...
def astype_overflowsafe(
arr: np.ndarray, dtype: np.dtype, copy: bool = ...
) -> np.ndarray: ...
+def is_unitless(dtype: np.dtype) -> bool: ...
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 07d198193464f..a787bded2bd50 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -92,6 +92,18 @@ def py_get_unit_from_dtype(dtype):
return get_unit_from_dtype(dtype)
+def is_unitless(dtype: cnp.dtype) -> bool:
+ """
+ Check if a datetime64 or timedelta64 dtype has no attached unit.
+ """
+ if dtype.type_num not in [cnp.NPY_DATETIME, cnp.NPY_TIMEDELTA]:
+ raise ValueError("is_unitless dtype must be datetime64 or timedelta64")
+ cdef:
+ NPY_DATETIMEUNIT unit = get_unit_from_dtype(dtype)
+
+ return unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
+
+
# ----------------------------------------------------------------------
# Comparison
diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi
index a04104915cf1f..c547503bae273 100644
--- a/pandas/_libs/tslibs/timedeltas.pyi
+++ b/pandas/_libs/tslibs/timedeltas.pyi
@@ -88,6 +88,8 @@ class Timedelta(timedelta):
# GH 46171
# While Timedelta can return pd.NaT, having the constructor return
# a Union with NaTType makes things awkward for users of pandas
+ @classmethod
+ def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
@property
def days(self) -> int: ...
@property
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e7f97413f6881..7fef934a85626 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -31,6 +31,7 @@
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
+ is_unitless,
normalize_i8_timestamps,
timezones,
to_offset,
@@ -335,7 +336,12 @@ def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
- assert values.dtype == DT64NS_DTYPE
+ assert dtype.kind == "M"
+ if isinstance(dtype, np.dtype):
+ # TODO: once non-nano DatetimeTZDtype is implemented, require that
+ # dtype's reso match values's reso
+ assert dtype == values.dtype
+ assert not is_unitless(dtype)
result = super()._simple_new(values, dtype)
result._freq = freq
@@ -761,7 +767,7 @@ def _add_offset(self, offset) -> DatetimeArray:
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
- result = DatetimeArray._simple_new(result)
+ result = DatetimeArray._simple_new(result, dtype=result.dtype)
result = result.tz_localize(self.tz)
except NotImplementedError:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1f55842050df0..816f07b076ef8 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -29,6 +29,7 @@
precision_from_unit,
)
from pandas._libs.tslibs.fields import get_timedelta_field
+from pandas._libs.tslibs.np_datetime import py_get_unit_from_dtype
from pandas._libs.tslibs.timedeltas import (
array_to_timedelta64,
ints_to_pytimedelta,
@@ -40,6 +41,7 @@
npt,
)
from pandas.compat.numpy import function as nv
+from pandas.util._decorators import cache_readonly
from pandas.util._validators import validate_endpoints
from pandas.core.dtypes.astype import astype_td64_unit_conversion
@@ -154,8 +156,15 @@ class TimedeltaArray(dtl.TimelikeOps):
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
+ @cache_readonly
+ def _reso(self):
+ return py_get_unit_from_dtype(self.dtype)
+
def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
- return Timedelta(x, unit="ns")
+ y = x.view("i8")
+ if y == NaT.value:
+ return NaT
+ return Timedelta._from_value_and_reso(y, reso=self._reso)
@property
# error: Return type "dtype" of "dtype" incompatible with return type
@@ -174,7 +183,7 @@ def dtype(self) -> np.dtype: # type: ignore[override]
-------
numpy.dtype
"""
- return TD64NS_DTYPE
+ return self._ndarray.dtype
# ----------------------------------------------------------------
# Constructors
@@ -244,11 +253,13 @@ def __init__(
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=TD64NS_DTYPE
) -> TimedeltaArray:
- assert dtype == TD64NS_DTYPE, dtype
+ # Require td64 dtype, not unit-less, matching values.dtype
+ assert isinstance(dtype, np.dtype) and dtype.kind == "m"
+ assert not tslibs.is_unitless(dtype)
assert isinstance(values, np.ndarray), type(values)
- assert values.dtype == TD64NS_DTYPE
+ assert dtype == values.dtype
- result = super()._simple_new(values=values, dtype=TD64NS_DTYPE)
+ result = super()._simple_new(values=values, dtype=dtype)
result._freq = freq
return result
@@ -262,7 +273,7 @@ def _from_sequence(
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)
freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)
- return cls._simple_new(data, freq=freq)
+ return cls._simple_new(data, dtype=data.dtype, freq=freq)
@classmethod
def _from_sequence_not_strict(
@@ -286,7 +297,7 @@ def _from_sequence_not_strict(
if explicit_none:
freq = None
- result = cls._simple_new(data, freq=freq)
+ result = cls._simple_new(data, dtype=data.dtype, freq=freq)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
@@ -330,7 +341,8 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
if not right_closed:
index = index[:-1]
- return cls._simple_new(index.view("m8[ns]"), freq=freq)
+ td64values = index.view("m8[ns]")
+ return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq)
# ----------------------------------------------------------------
# DatetimeLike Interface
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 9ea87be2a5468..8eb5cc2dd82f6 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -11,6 +11,17 @@
from pandas.core.arrays import DatetimeArray
+class TestNonNano:
+ @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)])
+ @pytest.mark.xfail(reason="_box_func is not yet patched to get reso right")
+ def test_non_nano(self, unit, reso):
+ arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
+ dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
+
+ assert dta.dtype == arr.dtype
+ assert dta[0]._reso == reso
+
+
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index bf3491496ab3a..46306167878f6 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -7,6 +7,16 @@
from pandas.core.arrays import TimedeltaArray
+class TestNonNano:
+ @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)])
+ def test_non_nano(self, unit, reso):
+ arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
+ tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
+
+ assert tda.dtype == arr.dtype
+ assert tda[0]._reso == reso
+
+
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index 9655bb88c2fcf..273a7985ff50b 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -50,6 +50,7 @@ def test_namespace():
"tz_convert_from_utc_single",
"to_offset",
"tz_compare",
+ "is_unitless",
]
expected = set(submodules + api)
diff --git a/pandas/tests/tslibs/test_np_datetime.py b/pandas/tests/tslibs/test_np_datetime.py
index 336c7d30d5f77..31f48b9ad7c4a 100644
--- a/pandas/tests/tslibs/test_np_datetime.py
+++ b/pandas/tests/tslibs/test_np_datetime.py
@@ -4,6 +4,7 @@
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
astype_overflowsafe,
+ is_unitless,
py_get_unit_from_dtype,
py_td64_to_tdstruct,
)
@@ -11,6 +12,28 @@
import pandas._testing as tm
+def test_is_unitless():
+ dtype = np.dtype("M8[ns]")
+ assert not is_unitless(dtype)
+
+ dtype = np.dtype("datetime64")
+ assert is_unitless(dtype)
+
+ dtype = np.dtype("m8[ns]")
+ assert not is_unitless(dtype)
+
+ dtype = np.dtype("timedelta64")
+ assert is_unitless(dtype)
+
+ msg = "dtype must be datetime64 or timedelta64"
+ with pytest.raises(ValueError, match=msg):
+ is_unitless(np.dtype(np.int64))
+
+ msg = "Argument 'dtype' has incorrect type"
+ with pytest.raises(TypeError, match=msg):
+ is_unitless("foo")
+
+
def test_get_unit_from_dtype():
# datetime64
assert py_get_unit_from_dtype(np.dtype("M8[Y]")) == 0
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46901 | 2022-04-30T01:26:17Z | 2022-05-06T21:20:03Z | 2022-05-06T21:20:03Z | 2022-05-06T22:26:38Z |
Update Git.io deprecated link (#46888) | diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 2f65ce17f93b2..1904ce32f3170 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -435,7 +435,7 @@ def _str_rstrip(self, to_strip=None):
def _str_removeprefix(self, prefix: str) -> Series:
# outstanding question on whether to use native methods for users
- # on Python 3.9+ https://git.io/JE9QK, in which case we could do
+ # on Python 3.9+ https://bit.ly/3LuMeRn, in which case we could do
# return self._str_map(str.removeprefix)
def removeprefix(text: str) -> str:
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46899 | 2022-04-29T23:56:21Z | 2022-04-30T00:48:13Z | 2022-04-30T00:48:13Z | 2022-04-30T15:41:44Z |
REF: Use Localizer more | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index c876cc55be0be..e5217259a3648 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -52,12 +52,8 @@ from pandas._libs.tslibs.np_datetime import (
)
from pandas._libs.tslibs.timezones cimport (
- get_dst_info,
get_utcoffset,
- is_fixed_offset,
- is_tzlocal,
is_utc,
- is_zoneinfo,
maybe_get_tz,
tz_compare,
utc_pytz as UTC,
@@ -77,10 +73,7 @@ from pandas._libs.tslibs.nattype cimport (
checknull_with_nat,
)
from pandas._libs.tslibs.tzconversion cimport (
- bisect_right_i8,
- infer_dateutil_fold,
- localize_tzinfo_api,
- tz_convert_from_utc_single,
+ Localizer,
tz_localize_to_utc_single,
)
@@ -518,9 +511,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts,
_TSObject obj = _TSObject()
int64_t value # numpy dt64
datetime dt
- ndarray[int64_t] trans
- int64_t* tdata
- int64_t[::1] deltas
+ Py_ssize_t pos
value = dtstruct_to_dt64(&dts)
obj.dts = dts
@@ -530,19 +521,18 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts,
check_overflows(obj)
return obj
+ cdef:
+ Localizer info = Localizer(tz)
+
# Infer fold from offset-adjusted obj.value
# see PEP 495 https://www.python.org/dev/peps/pep-0495/#the-fold-attribute
- if is_utc(tz):
+ if info.use_utc:
pass
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- localize_tzinfo_api(obj.value, tz, &obj.fold)
- else:
- trans, deltas, typ = get_dst_info(tz)
-
- if typ == 'dateutil':
- tdata = <int64_t*>cnp.PyArray_DATA(trans)
- pos = bisect_right_i8(tdata, obj.value, trans.shape[0]) - 1
- obj.fold = infer_dateutil_fold(obj.value, trans, deltas, pos)
+ elif info.use_tzlocal:
+ info.utc_val_to_local_val(obj.value, &pos, &obj.fold)
+ elif info.use_dst and not info.use_pytz:
+ # i.e. dateutil
+ info.utc_val_to_local_val(obj.value, &pos, &obj.fold)
# Keep the converter same as PyDateTime's
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day,
@@ -700,18 +690,19 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz):
cdef:
int64_t local_val
Py_ssize_t outpos = -1
+ Localizer info = Localizer(tz)
assert obj.tzinfo is None
- if is_utc(tz):
+ if info.use_utc:
pass
elif obj.value == NPY_NAT:
pass
else:
- local_val = tz_convert_from_utc_single(obj.value, tz, &obj.fold, &outpos)
+ local_val = info.utc_val_to_local_val(obj.value, &outpos, &obj.fold)
- if outpos != -1:
- # infer we went through a pytz path
+ if info.use_pytz:
+ # infer we went through a pytz path, will have outpos!=-1
tz = tz._tzinfos[tz._transition_info[outpos]]
dt64_to_dtstruct(local_val, &obj.dts)
diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index ce7541fe1e74e..a34161b20e2ff 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -2,6 +2,7 @@ from cpython.datetime cimport tzinfo
from numpy cimport (
int64_t,
intp_t,
+ ndarray,
)
@@ -21,5 +22,23 @@ cdef bint infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
- intp_t pos,
+ Py_ssize_t pos,
)
+
+
+cdef class Localizer:
+ cdef:
+ tzinfo tz
+ bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
+ ndarray trans
+ Py_ssize_t ntrans
+ const int64_t[::1] deltas
+ int64_t delta
+ int64_t* tdata
+
+ cdef inline int64_t utc_val_to_local_val(
+ self,
+ int64_t utc_val,
+ Py_ssize_t* pos,
+ bint* fold=?,
+ ) except? -1
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index c6fe40d082038..8d307e324ba4e 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -46,6 +46,75 @@ from pandas._libs.tslibs.timezones cimport (
)
+cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
+
+
+@cython.freelist(16)
+#@cython.internal
+@cython.final
+cdef class Localizer:
+ # cdef:
+ # tzinfo tz
+ # bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
+ # ndarray trans
+ # Py_ssize_t ntrans
+ # const int64_t[::1] deltas
+ # int64_t delta
+ # int64_t* tdata
+
+ @cython.initializedcheck(False)
+ @cython.boundscheck(False)
+ def __cinit__(self, tzinfo tz):
+ self.tz = tz
+ self.use_utc = self.use_tzlocal = self.use_fixed = False
+ self.use_dst = self.use_pytz = False
+ self.ntrans = -1 # placeholder
+ self.delta = -1 # placeholder
+ self.deltas = _deltas_placeholder
+ self.tdata = NULL
+
+ if is_utc(tz) or tz is None:
+ self.use_utc = True
+
+ elif is_tzlocal(tz) or is_zoneinfo(tz):
+ self.use_tzlocal = True
+
+ else:
+ trans, deltas, typ = get_dst_info(tz)
+ self.trans = trans
+ self.ntrans = self.trans.shape[0]
+ self.deltas = deltas
+
+ if typ != "pytz" and typ != "dateutil":
+ # static/fixed; in this case we know that len(delta) == 1
+ self.use_fixed = True
+ self.delta = self.deltas[0]
+ else:
+ self.use_dst = True
+ if typ == "pytz":
+ self.use_pytz = True
+ self.tdata = <int64_t*>cnp.PyArray_DATA(self.trans)
+
+ @cython.boundscheck(False)
+ cdef inline int64_t utc_val_to_local_val(
+ self, int64_t utc_val, Py_ssize_t* pos, bint* fold=NULL
+ ) except? -1:
+ if self.use_utc:
+ return utc_val
+ elif self.use_tzlocal:
+ return utc_val + localize_tzinfo_api(utc_val, self.tz, fold)
+ elif self.use_fixed:
+ return utc_val + self.delta
+ else:
+ pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
+ if fold is not NULL:
+ fold[0] = infer_dateutil_fold(
+ utc_val, self.trans, self.deltas, pos[0]
+ )
+
+ return utc_val + self.deltas[pos[0]]
+
+
cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=None, object nonexistent=None,
) except? -1:
@@ -465,44 +534,16 @@ cdef int64_t tz_convert_from_utc_single(
converted: int64
"""
cdef:
- int64_t delta
- int64_t[::1] deltas
- ndarray[int64_t, ndim=1] trans
- int64_t* tdata
- intp_t pos
+ Localizer info = Localizer(tz)
+ Py_ssize_t pos
if utc_val == NPY_NAT:
return utc_val
- if is_utc(tz):
- return utc_val
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- return utc_val + _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False, fold=fold)
+ if outpos is not NULL and info.use_pytz:
+ return info.utc_val_to_local_val(utc_val, outpos, fold)
else:
- trans, deltas, typ = get_dst_info(tz)
- tdata = <int64_t*>cnp.PyArray_DATA(trans)
-
- if typ == "dateutil":
- pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1
-
- if fold is not NULL:
- fold[0] = infer_dateutil_fold(utc_val, trans, deltas, pos)
- return utc_val + deltas[pos]
-
- elif typ == "pytz":
- pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1
-
- # We need to get 'pos' back to the caller so it can pick the
- # correct "standardized" tzinfo object.
- if outpos is not NULL:
- outpos[0] = pos
- return utc_val + deltas[pos]
-
- else:
- # All other cases have len(deltas) == 1. As of 2018-07-17
- # (and 2022-03-07), all test cases that get here have
- # is_fixed_offset(tz).
- return utc_val + deltas[0]
+ return info.utc_val_to_local_val(utc_val, &pos, fold)
# OSError may be thrown by tzlocal on windows at or close to 1970-01-01
@@ -571,7 +612,7 @@ cdef bint infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
- intp_t pos,
+ Py_ssize_t pos,
):
"""
Infer _TSObject fold property from value by assuming 0 and then setting
@@ -584,7 +625,7 @@ cdef bint infer_dateutil_fold(
ndarray of offset transition points in nanoseconds since epoch.
deltas : int64_t[:]
array of offsets corresponding to transition points in trans.
- pos : intp_t
+ pos : Py_ssize_t
Position of the last transition point before taking fold into account.
Returns
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 6b78100705a93..fea357c9da98b 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -32,78 +32,8 @@ from .np_datetime cimport (
from .offsets cimport BaseOffset
from .period cimport get_period_ordinal
from .timestamps cimport create_timestamp_from_ts
-from .timezones cimport (
- get_dst_info,
- is_tzlocal,
- is_utc,
- is_zoneinfo,
-)
-from .tzconversion cimport (
- bisect_right_i8,
- localize_tzinfo_api,
-)
-
-
-cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
-
-
-@cython.freelist(16)
-@cython.internal
-@cython.final
-cdef class Localizer:
- cdef:
- tzinfo tz
- bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
- ndarray trans
- Py_ssize_t ntrans
- const int64_t[::1] deltas
- int64_t delta
- int64_t* tdata
-
- @cython.initializedcheck(False)
- @cython.boundscheck(False)
- def __cinit__(self, tzinfo tz):
- self.tz = tz
- self.use_utc = self.use_tzlocal = self.use_fixed = False
- self.use_dst = self.use_pytz = False
- self.ntrans = -1 # placeholder
- self.delta = -1 # placeholder
- self.deltas = _deltas_placeholder
- self.tdata = NULL
-
- if is_utc(tz) or tz is None:
- self.use_utc = True
-
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- self.use_tzlocal = True
-
- else:
- trans, deltas, typ = get_dst_info(tz)
- self.trans = trans
- self.ntrans = self.trans.shape[0]
- self.deltas = deltas
-
- if typ != "pytz" and typ != "dateutil":
- # static/fixed; in this case we know that len(delta) == 1
- self.use_fixed = True
- self.delta = self.deltas[0]
- else:
- self.use_dst = True
- if typ == "pytz":
- self.use_pytz = True
- self.tdata = <int64_t*>cnp.PyArray_DATA(self.trans)
-
- @cython.boundscheck(False)
- cdef inline int64_t utc_val_to_local_val(self, int64_t utc_val, Py_ssize_t* pos) except? -1:
- if self.use_utc:
- return utc_val
- elif self.use_tzlocal:
- return utc_val + localize_tzinfo_api(utc_val, self.tz)
- elif self.use_fixed:
- return utc_val + self.delta
- else:
- pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
- return utc_val + self.deltas[pos[0]]
+from .timezones cimport is_utc
+from .tzconversion cimport Localizer
@cython.boundscheck(False)
@@ -140,15 +70,7 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
result[i] = NPY_NAT
continue
- if info.use_utc:
- local_val = utc_val
- elif info.use_tzlocal:
- local_val = utc_val + localize_tzinfo_api(utc_val, tz)
- elif info.use_fixed:
- local_val = utc_val + info.delta
- else:
- pos = bisect_right_i8(info.tdata, utc_val, info.ntrans) - 1
- local_val = utc_val + info.deltas[pos]
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
result[i] = local_val
| I think this gets us to maximal feasible sharing for utc_val_to_local_val. | https://api.github.com/repos/pandas-dev/pandas/pulls/46898 | 2022-04-29T22:58:27Z | 2022-04-30T03:14:03Z | 2022-04-30T03:14:03Z | 2022-04-30T03:24:42Z |
Styler whatsew | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index c85a087835b80..9f8ab3118751d 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -37,10 +37,17 @@ The protocol consists of two parts:
Styler
^^^^^^
- - New method :meth:`.Styler.to_string` for alternative customisable output methods (:issue:`44502`)
- - Added the ability to render ``border`` and ``border-{side}`` CSS properties in Excel (:issue:`42276`)
- - Added a new method :meth:`.Styler.concat` which allows adding customised footer rows to visualise additional calculations on the data, e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
- - :meth:`.Styler.highlight_null` now accepts ``color`` consistently with other builtin methods and deprecates ``null_color`` although this remains backwards compatible (:issue:`45907`)
+The most notable development is the new method :meth:`.Styler.concat` which
+allows adding customised footer rows to visualise additional calculations on the data,
+e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
+
+Additionally there is an alternative output method :meth:`.Styler.to_string`,
+which allows using the Styler's formatting methods to create, for example, CSVs (:issue:`44502`).
+
+Minor feature improvements are:
+
+ - Adding the ability to render ``border`` and ``border-{side}`` CSS properties in Excel (:issue:`42276`)
+ - Making keyword arguments consist: :meth:`.Styler.highlight_null` now accepts ``color`` and deprecates ``null_color`` although this remains backwards compatible (:issue:`45907`)
.. _whatsnew_150.enhancements.resample_group_keys:
@@ -127,13 +134,6 @@ Notable bug fixes
These are bug fixes that might have notable behavior changes.
-.. _whatsnew_150.notable_bug_fixes.notable_bug_fix1:
-
-Styler
-^^^^^^
-
-- Fixed bug in :class:`CSSToExcelConverter` leading to ``TypeError`` when border color provided without border style for ``xlsxwriter`` engine (:issue:`42276`)
-
.. _whatsnew_150.notable_bug_fixes.groupby_transform_dropna:
Using ``dropna=True`` with ``groupby`` transforms
@@ -194,13 +194,6 @@ did not have the same index as the input.
df.groupby('a', dropna=True).transform('ffill')
df.groupby('a', dropna=True).transform(lambda x: x)
-.. _whatsnew_150.notable_bug_fixes.visualization:
-
-Styler
-^^^^^^
-
-- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
-
.. _whatsnew_150.notable_bug_fixes.notable_bug_fix2:
notable_bug_fix2
@@ -615,6 +608,7 @@ Plotting
- Bug in :meth:`DataFrame.boxplot` that prevented specifying ``vert=False`` (:issue:`36918`)
- Bug in :meth:`DataFrame.plot.scatter` that prevented specifying ``norm`` (:issue:`45809`)
- The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`)
+- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -659,7 +653,7 @@ ExtensionArray
Styler
^^^^^^
- Bug when attempting to apply styling functions to an empty DataFrame subset (:issue:`45313`)
--
+- Bug in :class:`CSSToExcelConverter` leading to ``TypeError`` when border color provided without border style for ``xlsxwriter`` engine (:issue:`42276`)
Metadata
^^^^^^^^
| editing the whatsnew placeholder ahead of 1.5, in case I don't have time later on
| https://api.github.com/repos/pandas-dev/pandas/pulls/46897 | 2022-04-29T19:51:48Z | 2022-04-29T22:42:07Z | 2022-04-29T22:42:07Z | 2022-04-30T14:49:25Z |
PERF: Optimize read_excel nrows | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 3363b43f29b78..a2d989e787e0f 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -86,4 +86,15 @@ def time_read_excel(self, engine):
read_excel(fname, engine=engine)
+class ReadExcelNRows(ReadExcel):
+ def time_read_excel(self, engine):
+ if engine == "xlrd":
+ fname = self.fname_excel_xls
+ elif engine == "odf":
+ fname = self.fname_odf
+ else:
+ fname = self.fname_excel
+ read_excel(fname, engine=engine, nrows=10)
+
+
from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 2efc6c9167a83..3bb9a72d6e2f9 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -573,6 +573,7 @@ Performance improvements
- Performance improvement when setting values in a pyarrow backed string array (:issue:`46400`)
- Performance improvement in :func:`factorize` (:issue:`46109`)
- Performance improvement in :class:`DataFrame` and :class:`Series` constructors for extension dtype scalars (:issue:`45854`)
+- Performance improvement in :func:`read_excel` when ``nrows`` argument provided (:issue:`32727`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.bug_fixes:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index fdee1600c2a32..98964b100966f 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -28,6 +28,7 @@
Generic,
Literal,
Mapping,
+ Sequence,
TypeVar,
cast,
overload,
@@ -56,7 +57,12 @@
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
-from pandas.core.dtypes.common import is_file_like
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_file_like,
+ is_integer,
+ is_list_like,
+)
from pandas.core.shared_docs import _shared_docs
@@ -177,12 +183,32 @@ def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
def validate_header_arg(header: object) -> None:
- if isinstance(header, bool):
+ if header is None:
+ return
+ if is_integer(header):
+ header = cast(int, header)
+ if header < 0:
+ # GH 27779
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
+ return
+ if is_list_like(header, allow_sets=False):
+ header = cast(Sequence, header)
+ if not all(map(is_integer, header)):
+ raise ValueError("header must be integer or list of integers")
+ if any(i < 0 for i in header):
+ raise ValueError("cannot specify multi-index header with negative integers")
+ return
+ if is_bool(header):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
+ # GH 16338
+ raise ValueError("header must be integer or list of integers")
@overload
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 030ae9fefda98..d20f347e54d6b 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -2,6 +2,7 @@
import abc
import datetime
+from functools import partial
from io import BytesIO
import os
from textwrap import fill
@@ -70,6 +71,7 @@
pop_header_name,
)
from pandas.io.parsers import TextParser
+from pandas.io.parsers.readers import validate_integer
_read_excel_doc = (
"""
@@ -563,7 +565,7 @@ def get_sheet_by_index(self, index: int):
pass
@abc.abstractmethod
- def get_sheet_data(self, sheet, convert_float: bool):
+ def get_sheet_data(self, sheet, convert_float: bool, rows: int | None = None):
pass
def raise_if_bad_sheet_by_index(self, index: int) -> None:
@@ -577,6 +579,99 @@ def raise_if_bad_sheet_by_name(self, name: str) -> None:
if name not in self.sheet_names:
raise ValueError(f"Worksheet named '{name}' not found")
+ def _check_skiprows_func(
+ self,
+ skiprows: Callable,
+ rows_to_use: int,
+ ) -> int:
+ """
+ Determine how many file rows are required to obtain `nrows` data
+ rows when `skiprows` is a function.
+
+ Parameters
+ ----------
+ skiprows : function
+ The function passed to read_excel by the user.
+ rows_to_use : int
+ The number of rows that will be needed for the header and
+ the data.
+
+ Returns
+ -------
+ int
+ """
+ i = 0
+ rows_used_so_far = 0
+ while rows_used_so_far < rows_to_use:
+ if not skiprows(i):
+ rows_used_so_far += 1
+ i += 1
+ return i
+
+ def _calc_rows(
+ self,
+ header: int | Sequence[int] | None,
+ index_col: int | Sequence[int] | None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None,
+ nrows: int | None,
+ ) -> int | None:
+ """
+ If nrows specified, find the number of rows needed from the
+ file, otherwise return None.
+
+
+ Parameters
+ ----------
+ header : int, list of int, or None
+ See read_excel docstring.
+ index_col : int, list of int, or None
+ See read_excel docstring.
+ skiprows : list-like, int, callable, or None
+ See read_excel docstring.
+ nrows : int or None
+ See read_excel docstring.
+
+ Returns
+ -------
+ int or None
+ """
+ if nrows is None:
+ return None
+ if header is None:
+ header_rows = 1
+ elif is_integer(header):
+ header = cast(int, header)
+ header_rows = 1 + header
+ else:
+ header = cast(Sequence, header)
+ header_rows = 1 + header[-1]
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ if is_list_like(header) and index_col is not None:
+ header = cast(Sequence, header)
+ if len(header) > 1:
+ header_rows += 1
+ if skiprows is None:
+ return header_rows + nrows
+ if is_integer(skiprows):
+ skiprows = cast(int, skiprows)
+ return header_rows + nrows + skiprows
+ if is_list_like(skiprows):
+
+ def f(skiprows: Sequence, x: int) -> bool:
+ return x in skiprows
+
+ skiprows = cast(Sequence, skiprows)
+ return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
+ if callable(skiprows):
+ return self._check_skiprows_func(
+ skiprows,
+ header_rows + nrows,
+ )
+ # else unexpected skiprows type: read_excel will not optimize
+ # the number of rows read from file
+ return None
+
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
@@ -613,6 +708,7 @@ def parse(
)
validate_header_arg(header)
+ validate_integer("nrows", nrows)
ret_dict = False
@@ -643,7 +739,8 @@ def parse(
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
- data = self.get_sheet_data(sheet, convert_float)
+ file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
+ data = self.get_sheet_data(sheet, convert_float, file_rows_needed)
if hasattr(sheet, "close"):
# pyxlsb opens two TemporaryFiles
sheet.close()
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 5a7e5b0d8d325..075590f3535fe 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -90,7 +90,7 @@ def get_sheet_by_name(self, name: str):
raise ValueError(f"sheet {name} not found")
def get_sheet_data(
- self, sheet, convert_float: bool
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
) -> list[list[Scalar | NaTType]]:
"""
Parse an ODF Table into a list of lists
@@ -148,6 +148,8 @@ def get_sheet_data(
empty_rows = 0
for _ in range(row_repeat):
table.append(table_row)
+ if file_rows_needed is not None and len(table) >= file_rows_needed:
+ break
# Make our table square
for row in table:
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 6d70b3f319f37..8f4201d0befff 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -588,7 +588,9 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.value
- def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
+ def get_sheet_data(
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
if self.book.read_only:
sheet.reset_dimensions()
@@ -603,6 +605,8 @@ def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
if converted_row:
last_row_with_data = row_number
data.append(converted_row)
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
# Trim trailing empty rows
data = data[: last_row_with_data + 1]
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index 36e2645560078..5d40ccdf2f8f3 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -79,7 +79,12 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.v
- def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ file_rows_needed: int | None = None,
+ ) -> list[list[Scalar]]:
data: list[list[Scalar]] = []
prevous_row_number = -1
# When sparse=True the rows can have different lengths and empty rows are
@@ -94,6 +99,8 @@ def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
data.extend([[]] * (row_number - prevous_row_number - 1))
data.append(converted_row)
prevous_row_number = row_number
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
if data:
# extend rows to max_width
max_width = max(len(data_row) for data_row in data)
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index f38a05e7a4e64..0bf3ac6134cf6 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -1,8 +1,13 @@
+from __future__ import annotations
+
from datetime import time
import numpy as np
-from pandas._typing import StorageOptions
+from pandas._typing import (
+ Scalar,
+ StorageOptions,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
@@ -56,7 +61,9 @@ def get_sheet_by_index(self, index):
self.raise_if_bad_sheet_by_index(index)
return self.book.sheet_by_index(index)
- def get_sheet_data(self, sheet, convert_float):
+ def get_sheet_data(
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
from xlrd import (
XL_CELL_BOOLEAN,
XL_CELL_DATE,
@@ -107,7 +114,10 @@ def _parse_cell(cell_contents, cell_typ):
data = []
- for i in range(sheet.nrows):
+ nrows = sheet.nrows
+ if file_rows_needed is not None:
+ nrows = min(nrows, file_rows_needed)
+ for i in range(nrows):
row = [
_parse_cell(value, typ)
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 2851ea36c8a33..e9c39d5ff1996 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -120,13 +120,7 @@ def __init__(self, kwds) -> None:
# validate header options for mi
self.header = kwds.get("header")
- if isinstance(self.header, (list, tuple, np.ndarray)):
- if not all(map(is_integer, self.header)):
- raise ValueError("header must be integer or list of integers")
- if any(i < 0 for i in self.header):
- raise ValueError(
- "cannot specify multi-index header with negative integers"
- )
+ if is_list_like(self.header, allow_sets=False):
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
@@ -138,9 +132,8 @@ def __init__(self, kwds) -> None:
# validate index_col that only contains integers
if self.index_col is not None:
- is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
- is_sequence
+ is_list_like(self.index_col, allow_sets=False)
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
@@ -148,21 +141,11 @@ def __init__(self, kwds) -> None:
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
- elif self.header is not None:
+ elif self.header is not None and self.prefix is not None:
# GH 27394
- if self.prefix is not None:
- raise ValueError(
- "Argument prefix must be None if argument header is not None"
- )
- # GH 16338
- elif not is_integer(self.header):
- raise ValueError("header must be integer or list of integers")
- # GH 27779
- elif self.header < 0:
- raise ValueError(
- "Passing negative integer to header is invalid. "
- "For no header, use header=None instead"
- )
+ raise ValueError(
+ "Argument prefix must be None if argument header is not None"
+ )
self._name_processed = False
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 1e0f74ea41453..c58896e9e1baf 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1219,6 +1219,42 @@ def test_read_excel_nrows_non_integer_parameter(self, read_ext):
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, nrows="5")
+ @pytest.mark.parametrize(
+ "filename,sheet_name,header,index_col,skiprows",
+ [
+ ("testmultiindex", "mi_column", [0, 1], 0, None),
+ ("testmultiindex", "mi_index", None, [0, 1], None),
+ ("testmultiindex", "both", [0, 1], [0, 1], None),
+ ("testmultiindex", "mi_column_name", [0, 1], 0, None),
+ ("testskiprows", "skiprows_list", None, None, [0, 2]),
+ ("testskiprows", "skiprows_list", None, None, lambda x: x == 0 or x == 2),
+ ],
+ )
+ def test_read_excel_nrows_params(
+ self, read_ext, filename, sheet_name, header, index_col, skiprows
+ ):
+ """
+ For various parameters, we should get the same result whether we
+ limit the rows during load (nrows=3) or after (df.iloc[:3]).
+ """
+ # GH 46894
+ expected = pd.read_excel(
+ filename + read_ext,
+ sheet_name=sheet_name,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ ).iloc[:3]
+ actual = pd.read_excel(
+ filename + read_ext,
+ sheet_name=sheet_name,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ nrows=3,
+ )
+ tm.assert_frame_equal(actual, expected)
+
def test_read_excel_squeeze(self, read_ext):
# GH 12157
f = "test_squeeze" + read_ext
| - [x] closes #32727
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I timed read_excel on a file with 10 columns and 1000 rows, and report the best of 10 repeats (ms) below. This change can make a modest improvement on xls and ods files, and a significant improvement on xlsx and xlsb files. When `nrows=None` this has no measurable impact on the run time.
| ext | nrows | time (main) | time (this branch) |
|------|-------|------------|------------|
| xls | None | 22.4 | 22.1 |
| xls | 10 | 21.4 | 17.0 |
| xlsx | None | 99.1 | 99.7 |
| xlsx | 10 | 98.0 | 8.8 |
| xlsb | None | 81.0 | 80.2 |
| xlsb | 10 | 80.2 | 4.8 |
| ods | None | 571 | 569 |
| ods | 10 | 566 | 517 |
Here are the results of ```asv run -e -E existing --bench ReadExcel``` showing similar results (the benchmark spreadsheet is different than the one above).
```
[75.00%] ··· io.excel.ReadExcel.time_read_excel ok
[75.00%] ··· ========== ============
engine
---------- ------------
xlrd 36.5±0.3ms
openpyxl 162±0.1ms
odf 688±5ms
========== ============
[100.00%] ··· io.excel.ReadExcelNRows.time_read_excel ok
[100.00%] ··· ========== ============
engine
---------- ------------
xlrd 24.5±0.1ms
openpyxl 29.0±0.1ms
odf 508±3ms
========== ============
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/46894 | 2022-04-29T05:06:53Z | 2022-06-05T23:49:12Z | 2022-06-05T23:49:12Z | 2022-06-07T03:22:23Z |
BUG: rolling aggregate() with a list of functions along axis 1 raises ValueError #46132 | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 6bf6fd65f5633..0f361ca9a74a9 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -862,6 +862,7 @@ Groupby/resample/rolling
- Bug in :meth:`Rolling.skew` and :meth:`Rolling.kurt` would give NaN with window of same values (:issue:`30993`)
- Bug in :meth:`.Rolling.var` would segfault calculating weighted variance when window size was larger than data size (:issue:`46760`)
- Bug in :meth:`Grouper.__repr__` where ``dropna`` was not included. Now it is (:issue:`46754`)
+- Bug in :meth:`Rolling.aggregate` raise ValueError when ``func`` was a list of functions and ``axis=1`` (:issue:`46132`)
- Bug in :meth:`DataFrame.rolling` gives ValueError when center=True, axis=1 and win_type is specified (:issue:`46135`)
- Bug in :meth:`.DataFrameGroupBy.describe` and :meth:`.SeriesGroupBy.describe` produces inconsistent results for empty datasets (:issue:`41575`)
- Bug in :meth:`DataFrame.resample` reduction methods when used with ``on`` would attempt to aggregate the provided column (:issue:`47079`)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 9e8f95cf340c4..42343ad0c42c0 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -656,9 +656,28 @@ def _numba_apply(
return self._resolve_output(out, obj)
def aggregate(self, func, *args, **kwargs):
- result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
+ # GH46132
+ # modifying axis and transposing dataframe should not be needed
+ # once ReamplerWindow supports axis = 1
+
+ obj = self.obj
+ axis = self.axis
+
+ self.obj = self.obj.T if self.axis == 1 else self.obj
+ self.axis = 0
+
+ try:
+ result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
+ finally:
+ self.obj = obj
+ self.axis = axis
+
+ if axis == 1:
+ result = result.T if result is not None else result
+
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
+
return result
agg = aggregate
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 4c26cfb95fd85..35bd4853df515 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1851,6 +1851,22 @@ def test_rolling_var_same_value_count_logic(values, window, min_periods, expecte
tm.assert_series_equal(expected == 0, result_std == 0)
+def test_rolling_agg_on_columns():
+ # GH 46132
+
+ df = DataFrame({"a": [1, 3], "b": [2, 4]})
+ res = df.rolling(window=2, axis=1, min_periods=1).aggregate([np.sum, np.mean])
+ expected_val = np.array([[1, 3.0], [1, 1.5], [3, 7], [3, 3.5]])
+
+ expected_index = MultiIndex.from_tuples(
+ [(0, "sum"), (0, "mean"), (1, "sum"), (1, "mean")]
+ )
+
+ expected_frame = DataFrame(expected_val, index=expected_index, columns=["a", "b"])
+
+ tm.assert_frame_equal(expected_frame, res)
+
+
def test_rolling_mean_sum_floating_artifacts():
# GH 42064.
@@ -1862,6 +1878,24 @@ def test_rolling_mean_sum_floating_artifacts():
assert (result[-3:] == 0).all()
+def test_rolling_agg_when_agg_fail():
+ # GH 46132
+ df = DataFrame({"a": [1, 3], "b": [2, 4]})
+ win = df.rolling(window=2, axis=1, min_periods=1)
+ try:
+ win.aggregate([np.log, np.sqrt])
+ except ValueError:
+ pass
+ tm.assert_frame_equal(win.obj, df)
+ # make sure if aggregate fails the attribute of Rolling/Window will not be changed
+ assert win.axis == 1
+ # make sure the attribute of Rolling/Window will not be changed
+ # when aggregate runs successfully
+ win.aggregate([np.mean, np.sum])
+ tm.assert_frame_equal(win.obj, df)
+ assert win.axis == 1
+
+
def test_rolling_skew_kurt_floating_artifacts():
# GH 42064 46431
| - [x] closes #46132 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46892 | 2022-04-28T21:11:41Z | 2022-06-06T03:04:59Z | null | 2022-06-06T03:05:17Z |
REF: handle 2D in tslibs.vectorized | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8f145d0d66acc..4eb1494c4d56c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -354,6 +354,7 @@ def array_to_timedelta64(
raise ValueError(
"unit must not be specified if the input contains a str"
)
+ cnp.PyArray_ITER_NEXT(it)
# Usually, we have all strings. If so, we hit the fast path.
# If this path fails, we try conversion a different way, and
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi
index a8f81514c5645..61148605aadea 100644
--- a/pandas/_libs/tslibs/vectorized.pyi
+++ b/pandas/_libs/tslibs/vectorized.pyi
@@ -11,24 +11,24 @@ from pandas._libs.tslibs.offsets import BaseOffset
from pandas._typing import npt
def dt64arr_to_periodarr(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
freq: int,
tz: tzinfo | None,
-) -> npt.NDArray[np.int64]: ... # np.ndarray[np.int64, ndim=1]
+) -> npt.NDArray[np.int64]: ...
def is_date_array_normalized(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
) -> bool: ...
def normalize_i8_timestamps(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
) -> npt.NDArray[np.int64]: ...
def get_resolution(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
) -> Resolution: ...
def ints_to_pydatetime(
- arr: npt.NDArray[np.int64], # const int64_t[:}]
+ arr: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
freq: BaseOffset | None = ...,
fold: bool = ...,
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index fea357c9da98b..31d0579900abd 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -38,7 +38,8 @@ from .tzconversion cimport Localizer
@cython.boundscheck(False)
@cython.wraparound(False)
-def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
+def tz_convert_from_utc(ndarray stamps, tzinfo tz):
+ # stamps is int64_t, arbitrary ndim
"""
Convert the values (in i8) from UTC to tz
@@ -54,27 +55,33 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t pos, i, n = stamps.shape[0]
+ Py_ssize_t pos, i, n = stamps.size
- int64_t[::1] result
+ ndarray result
+ cnp.broadcast mi
if tz is None or is_utc(tz) or stamps.size == 0:
# Much faster than going through the "standard" pattern below
- return stamps.base.copy()
+ return stamps.copy()
- result = np.empty(n, dtype=np.int64)
+ result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_INT64, 0)
+ mi = cnp.PyArray_MultiIterNew2(result, stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
if utc_val == NPY_NAT:
- result[i] = NPY_NAT
- continue
+ local_val = NPY_NAT
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ # Analogous to: result[i] = local_val
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = local_val
- result[i] = local_val
+ cnp.PyArray_MultiIter_NEXT(mi)
- return result.base
+ return result
# -------------------------------------------------------------------------
@@ -83,12 +90,13 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
@cython.wraparound(False)
@cython.boundscheck(False)
def ints_to_pydatetime(
- const int64_t[:] stamps,
+ ndarray stamps,
tzinfo tz=None,
BaseOffset freq=None,
bint fold=False,
str box="datetime"
) -> np.ndarray:
+ # stamps is int64, arbitrary ndim
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp.
@@ -119,13 +127,21 @@ def ints_to_pydatetime(
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
npy_datetimestruct dts
tzinfo new_tz
- ndarray[object] result = np.empty(n, dtype=object)
bint use_date = False, use_time = False, use_ts = False, use_pydt = False
+ object res_val
+
+ # Note that `result` (and thus `result_flat`) is C-order and
+ # `it` iterates C-order as well, so the iteration matches
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ ndarray result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_OBJECT, 0)
+ object[::1] res_flat = result.ravel() # should NOT be a copy
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
if box == "date":
assert (tz is None), "tz should be None when converting to date"
@@ -142,31 +158,44 @@ def ints_to_pydatetime(
)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_ITER_DATA(it))[0]
+
new_tz = tz
if utc_val == NPY_NAT:
- result[i] = <object>NaT
- continue
+ res_val = <object>NaT
- local_val = info.utc_val_to_local_val(utc_val, &pos)
- if info.use_pytz:
- # find right representation of dst etc in pytz timezone
- new_tz = tz._tzinfos[tz._transition_info[pos]]
-
- dt64_to_dtstruct(local_val, &dts)
-
- if use_ts:
- result[i] = create_timestamp_from_ts(utc_val, dts, new_tz, freq, fold)
- elif use_pydt:
- result[i] = datetime(
- dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us,
- new_tz, fold=fold,
- )
- elif use_date:
- result[i] = date(dts.year, dts.month, dts.day)
else:
- result[i] = time(dts.hour, dts.min, dts.sec, dts.us, new_tz, fold=fold)
+
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
+ if info.use_pytz:
+ # find right representation of dst etc in pytz timezone
+ new_tz = tz._tzinfos[tz._transition_info[pos]]
+
+ dt64_to_dtstruct(local_val, &dts)
+
+ if use_ts:
+ res_val = create_timestamp_from_ts(utc_val, dts, new_tz, freq, fold)
+ elif use_pydt:
+ res_val = datetime(
+ dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us,
+ new_tz, fold=fold,
+ )
+ elif use_date:
+ res_val = date(dts.year, dts.month, dts.day)
+ else:
+ res_val = time(dts.hour, dts.min, dts.sec, dts.us, new_tz, fold=fold)
+
+ # Note: we can index result directly instead of using PyArray_MultiIter_DATA
+ # like we do for the other functions because result is known C-contiguous
+ # and is the first argument to PyArray_MultiIterNew2. The usual pattern
+ # does not seem to work with object dtype.
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ res_flat[i] = res_val
+
+ cnp.PyArray_ITER_NEXT(it)
return result
@@ -190,27 +219,33 @@ cdef inline c_Resolution _reso_stamp(npy_datetimestruct *dts):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution:
+def get_resolution(ndarray stamps, tzinfo tz=None) -> Resolution:
+ # stamps is int64_t, any ndim
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
npy_datetimestruct dts
c_Resolution reso = c_Resolution.RESO_DAY, curr_reso
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = cnp.PyArray_GETITEM(stamps, cnp.PyArray_ITER_DATA(it))
+
if utc_val == NPY_NAT:
- continue
+ pass
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ dt64_to_dtstruct(local_val, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
- dt64_to_dtstruct(local_val, &dts)
- curr_reso = _reso_stamp(&dts)
- if curr_reso < reso:
- reso = curr_reso
+ cnp.PyArray_ITER_NEXT(it)
return Resolution(reso)
@@ -221,7 +256,8 @@ def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution:
@cython.cdivision(False)
@cython.wraparound(False)
@cython.boundscheck(False)
-cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz):
+cpdef ndarray normalize_i8_timestamps(ndarray stamps, tzinfo tz):
+ # stamps is int64_t, arbitrary ndim
"""
Normalize each of the (nanosecond) timezone aware timestamps in the given
array by rounding down to the beginning of the day (i.e. midnight).
@@ -238,28 +274,35 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t
"""
cdef:
Localizer info = Localizer(tz)
- int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ int64_t utc_val, local_val, res_val
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
- int64_t[::1] result = np.empty(n, dtype=np.int64)
+ ndarray result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_INT64, 0)
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew2(result, stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
if utc_val == NPY_NAT:
- result[i] = NPY_NAT
- continue
+ res_val = NPY_NAT
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
+ res_val = local_val - (local_val % DAY_NANOS)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ # Analogous to: result[i] = res_val
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val
- result[i] = local_val - (local_val % DAY_NANOS)
+ cnp.PyArray_MultiIter_NEXT(mi)
- return result.base # `.base` to access underlying ndarray
+ return result
@cython.wraparound(False)
@cython.boundscheck(False)
-def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool:
+def is_date_array_normalized(ndarray stamps, tzinfo tz=None) -> bool:
+ # stamps is int64_t, arbitrary ndim
"""
Check if all of the given (nanosecond) timestamps are normalized to
midnight, i.e. hour == minute == second == 0. If the optional timezone
@@ -277,16 +320,21 @@ def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool:
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = cnp.PyArray_GETITEM(stamps, cnp.PyArray_ITER_DATA(it))
+
local_val = info.utc_val_to_local_val(utc_val, &pos)
if local_val % DAY_NANOS != 0:
return False
+ cnp.PyArray_ITER_NEXT(it)
+
return True
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46886 | 2022-04-27T21:31:15Z | 2022-05-06T21:30:54Z | 2022-05-06T21:30:54Z | 2022-05-06T22:25:47Z |
DOC: GH27557 Updates Python support documentation to match NumPy NEP 29 | diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index f8e6bda2085d8..d75262c08dfd6 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -51,7 +51,7 @@ pandas may change the behavior of experimental features at any time.
Python support
~~~~~~~~~~~~~~
-pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
-pandas **major** or **minor** releases.
+pandas mirrors the `NumPy guidelines for Python support <https://numpy.org/neps/nep-0029-deprecation_policy.html#implementation>`__.
+
.. _SemVer: https://semver.org
| - [x] closes [#27557](https://github.com/pandas-dev/pandas/issues/27557)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This updates the documentation for Python support to match NEP 29, and includes a link to NEP 29. The change is at line 410 of docs/development/policies.html. | https://api.github.com/repos/pandas-dev/pandas/pulls/46883 | 2022-04-27T07:02:56Z | 2022-04-28T13:37:33Z | 2022-04-28T13:37:33Z | 2022-04-28T13:37:34Z |
Backport PR #45247 on branch 1.4.x (PERF: find_stack_level) | diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 806e2abe83a92..ef467f096e963 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -29,17 +29,20 @@ def find_stack_level() -> int:
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
- stack = inspect.stack()
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
- for n in range(len(stack)):
- fname = stack[n].filename
+ # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
+ frame = inspect.currentframe()
+ n = 0
+ while frame:
+ fname = inspect.getfile(frame)
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
- continue
+ frame = frame.f_back
+ n += 1
else:
break
return n
| Backport PR #45247: PERF: find_stack_level | https://api.github.com/repos/pandas-dev/pandas/pulls/46881 | 2022-04-27T03:45:59Z | 2022-04-27T12:31:44Z | 2022-04-27T12:31:44Z | 2022-04-27T12:59:44Z |
REF: libhashtable.mode support mask | diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index 481ff0d36c460..5c7be5e660fd9 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -197,10 +197,13 @@ def duplicated(
values: np.ndarray,
keep: Literal["last", "first", False] = ...,
) -> npt.NDArray[np.bool_]: ...
-def mode(values: np.ndarray, dropna: bool) -> np.ndarray: ...
+def mode(
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+) -> np.ndarray: ...
def value_count(
values: np.ndarray,
dropna: bool,
+ mask: npt.NDArray[np.bool_] | None = None,
) -> tuple[np.ndarray, npt.NDArray[np.int64],]: ... # np.ndarray[same-as-values]
# arr and values should have same dtype
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index 11a45bb194c03..f7c41b32864be 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -31,9 +31,9 @@ dtypes = [('Complex128', 'complex128', 'complex128',
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
+cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, const uint8_t[:] mask=None):
{{else}}
-cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
+cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8_t[:] mask=None):
{{endif}}
cdef:
Py_ssize_t i = 0
@@ -46,6 +46,11 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{c_type}} val
int ret = 0
+ bint uses_mask = mask is not None
+ bint isna_entry = False
+
+ if uses_mask and not dropna:
+ raise NotImplementedError("uses_mask not implemented with dropna=False")
# we track the order in which keys are first seen (GH39009),
# khash-map isn't insertion-ordered, thus:
@@ -56,6 +61,9 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
table = kh_init_{{ttype}}()
{{if dtype == 'object'}}
+ if uses_mask:
+ raise NotImplementedError("uses_mask not implemented with object dtype")
+
kh_resize_{{ttype}}(table, n // 10)
for i in range(n):
@@ -74,7 +82,13 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
for i in range(n):
val = {{to_c_type}}(values[i])
- if not is_nan_{{c_type}}(val) or not dropna:
+ if dropna:
+ if uses_mask:
+ isna_entry = mask[i]
+ else:
+ isna_entry = is_nan_{{c_type}}(val)
+
+ if not dropna or not isna_entry:
k = kh_get_{{ttype}}(table, val)
if k != table.n_buckets:
table.vals[k] += 1
@@ -251,37 +265,37 @@ ctypedef fused htfunc_t:
complex64_t
-cpdef value_count(ndarray[htfunc_t] values, bint dropna):
+cpdef value_count(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
if htfunc_t is object:
- return value_count_object(values, dropna)
+ return value_count_object(values, dropna, mask=mask)
elif htfunc_t is int8_t:
- return value_count_int8(values, dropna)
+ return value_count_int8(values, dropna, mask=mask)
elif htfunc_t is int16_t:
- return value_count_int16(values, dropna)
+ return value_count_int16(values, dropna, mask=mask)
elif htfunc_t is int32_t:
- return value_count_int32(values, dropna)
+ return value_count_int32(values, dropna, mask=mask)
elif htfunc_t is int64_t:
- return value_count_int64(values, dropna)
+ return value_count_int64(values, dropna, mask=mask)
elif htfunc_t is uint8_t:
- return value_count_uint8(values, dropna)
+ return value_count_uint8(values, dropna, mask=mask)
elif htfunc_t is uint16_t:
- return value_count_uint16(values, dropna)
+ return value_count_uint16(values, dropna, mask=mask)
elif htfunc_t is uint32_t:
- return value_count_uint32(values, dropna)
+ return value_count_uint32(values, dropna, mask=mask)
elif htfunc_t is uint64_t:
- return value_count_uint64(values, dropna)
+ return value_count_uint64(values, dropna, mask=mask)
elif htfunc_t is float64_t:
- return value_count_float64(values, dropna)
+ return value_count_float64(values, dropna, mask=mask)
elif htfunc_t is float32_t:
- return value_count_float32(values, dropna)
+ return value_count_float32(values, dropna, mask=mask)
elif htfunc_t is complex128_t:
- return value_count_complex128(values, dropna)
+ return value_count_complex128(values, dropna, mask=mask)
elif htfunc_t is complex64_t:
- return value_count_complex64(values, dropna)
+ return value_count_complex64(values, dropna, mask=mask)
else:
raise TypeError(values.dtype)
@@ -361,7 +375,7 @@ cpdef ismember(ndarray[htfunc_t] arr, ndarray[htfunc_t] values):
@cython.wraparound(False)
@cython.boundscheck(False)
-def mode(ndarray[htfunc_t] values, bint dropna):
+def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
# TODO(cython3): use const htfunct_t[:]
cdef:
@@ -372,7 +386,7 @@ def mode(ndarray[htfunc_t] values, bint dropna):
int64_t count, max_count = -1
Py_ssize_t nkeys, k, j = 0
- keys, counts = value_count(values, dropna)
+ keys, counts = value_count(values, dropna, mask=mask)
nkeys = len(keys)
modes = np.empty(nkeys, dtype=values.dtype)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 0c0b93f41c657..112c401500472 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -858,12 +858,15 @@ def value_counts(
# Called once from SparseArray, otherwise could be private
-def value_counts_arraylike(values: np.ndarray, dropna: bool):
+def value_counts_arraylike(
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+):
"""
Parameters
----------
values : np.ndarray
dropna : bool
+ mask : np.ndarray[bool] or None, default None
Returns
-------
@@ -873,7 +876,7 @@ def value_counts_arraylike(values: np.ndarray, dropna: bool):
original = values
values = _ensure_data(values)
- keys, counts = htable.value_count(values, dropna)
+ keys, counts = htable.value_count(values, dropna, mask=mask)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
@@ -911,7 +914,9 @@ def duplicated(
return htable.duplicated(values, keep=keep)
-def mode(values: ArrayLike, dropna: bool = True) -> ArrayLike:
+def mode(
+ values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
+) -> ArrayLike:
"""
Returns the mode(s) of an array.
@@ -937,7 +942,7 @@ def mode(values: ArrayLike, dropna: bool = True) -> ArrayLike:
values = _ensure_data(values)
- npresult = htable.mode(values, dropna=dropna)
+ npresult = htable.mode(values, dropna=dropna, mask=mask)
try:
npresult = np.sort(npresult)
except TypeError as err:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index eca7a205983ef..01a04b7aa63d9 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -26,7 +26,6 @@
from pandas._libs import (
NaT,
algos as libalgos,
- hashtable as htable,
lib,
)
from pandas._libs.arrays import NDArrayBacked
@@ -2255,14 +2254,15 @@ def mode(self, dropna: bool = True) -> Categorical:
def _mode(self, dropna: bool = True) -> Categorical:
codes = self._codes
+ mask = None
if dropna:
- good = self._codes != -1
- codes = self._codes[good]
+ mask = self.isna()
- codes = htable.mode(codes, dropna)
- codes.sort()
- codes = coerce_indexer_dtype(codes, self.dtype.categories)
- return self._from_backing_data(codes)
+ res_codes = algorithms.mode(codes, mask=mask)
+ res_codes = cast(np.ndarray, res_codes)
+ assert res_codes.dtype == codes.dtype
+ res = self._from_backing_data(res_codes)
+ return res
# ------------------------------------------------------------------
# ExtensionArray Interface
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 95363e598a06c..5ae71b305ac60 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -57,6 +57,7 @@
)
from pandas.core import (
+ algorithms as algos,
arraylike,
missing,
nanops,
@@ -907,6 +908,15 @@ def value_counts(self, dropna: bool = True) -> Series:
)
from pandas.arrays import IntegerArray
+ if dropna:
+ keys, counts = algos.value_counts_arraylike(
+ self._data, dropna=True, mask=self._mask
+ )
+ res = Series(counts, index=keys)
+ res.index = res.index.astype(self.dtype)
+ res = res.astype("Int64")
+ return res
+
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46880 | 2022-04-26T23:32:51Z | 2022-04-27T12:32:25Z | 2022-04-27T12:32:25Z | 2022-04-27T14:56:08Z |
Merge nonstring columns | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 55bfb044fb31d..5397b1763242d 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -154,6 +154,8 @@ Other enhancements
- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
- :class:`DataError`, :class:`SpecificationError`, :class:`SettingWithCopyError`, :class:`SettingWithCopyWarning`, and :class:`NumExprClobberingError` are now exposed in ``pandas.errors`` (:issue:`27656`)
- Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`)
+- :func:`merge` and :meth:`DataFrame.merge` now allows passing ``None`` or ``(None, None)`` for ``suffixes`` argument, keeping column labels unchanged in the resulting :class:`DataFrame` potentially with duplicate column labels (:issue:`46885`)
+- :func:`DataFrame.join` now allows passing empty string for ``lsuffix`` and ``rsuffix`` arguments, keeping column labels unchanged in the resulting :class:`DataFrame` potentially with duplicate column labels (:issue:`46885`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/_typing.py b/pandas/_typing.py
index a85820a403fde..afa59e5485f62 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -111,7 +111,7 @@
IndexLabel = Union[Hashable, Sequence[Hashable]]
Level = Union[Hashable, int]
Shape = Tuple[int, ...]
-Suffixes = Tuple[Optional[str], Optional[str]]
+Suffixes = Optional[Tuple[Optional[str], Optional[str]]]
Ordered = Optional[bool]
JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
Frequency = Union[str, "DateOffset"]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4376c784bc847..0ada4db7ef5dc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -322,13 +322,13 @@
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
-suffixes : list-like, default is ("_x", "_y")
- A length-2 sequence where each element is optionally a string
+suffixes : optional list-like, default is ("_x", "_y")
+ An optional length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
- `right` should be left as-is, with no suffix. At least one of the
- values must not be None.
+ `right` should be left as-is, with no suffix. Pass `None` to keep
+ both columns labels as-is.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
@@ -412,14 +412,17 @@
4 bar 2 bar 6
5 baz 3 baz 7
-Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
-any overlapping columns.
-
->>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
-Traceback (most recent call last):
-...
-ValueError: columns overlap but no suffix specified:
- Index(['value'], dtype='object')
+Merge DataFrames df1 and df2 with null as suffix will keep
+the original columns names
+
+>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=None)
+ lkey value rkey value
+0 foo 1 foo 5
+1 foo 1 foo 8
+2 foo 5 foo 5
+3 foo 5 foo 8
+4 bar 2 bar 6
+5 baz 3 baz 7
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 4227d43c459d0..1b4da0a8774cc 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2359,7 +2359,7 @@ def _items_overlap_with_suffix(
If corresponding suffix is empty, the entry is simply converted to string.
"""
- if not is_list_like(suffixes, allow_sets=False):
+ if not (is_list_like(suffixes, allow_sets=False) or suffixes is None):
warnings.warn(
f"Passing 'suffixes' as a {type(suffixes)}, is not supported and may give "
"unexpected results. Provide 'suffixes' as a tuple instead. In the "
@@ -2372,10 +2372,7 @@ def _items_overlap_with_suffix(
if len(to_rename) == 0:
return left, right
- lsuffix, rsuffix = suffixes
-
- if not lsuffix and not rsuffix:
- raise ValueError(f"columns overlap but no suffix specified: {to_rename}")
+ lsuffix, rsuffix = suffixes if suffixes else (None, None)
def renamer(x, suffix):
"""
@@ -2386,12 +2383,12 @@ def renamer(x, suffix):
Parameters
----------
- x : original column name
+ x : original column label
suffix : str or None
Returns
-------
- x : renamed column name
+ x : renamed column label
"""
if x in to_rename and suffix is not None:
return f"{x}{suffix}"
diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 0935856fb223a..fb8da70bcd2d6 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -280,12 +280,6 @@ def test_join_index(float_frame):
with pytest.raises(ValueError, match="join method"):
f.join(f2, how="foo")
- # corner case - overlapping columns
- msg = "columns overlap but no suffix"
- for how in ("outer", "left", "inner"):
- with pytest.raises(ValueError, match=msg):
- float_frame.join(float_frame, how=how)
-
def test_join_index_more(float_frame):
af = float_frame.loc[:, ["A", "B"]]
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index ccdfc3cd23790..b2bce22931643 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -2209,6 +2209,10 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
(0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]),
("a", "a", {}, ["a_x", "a_y"]),
(0, 0, {}, ["0_x", "0_y"]),
+ (0, 0, {"suffixes": None}, [0, 0]),
+ (0, 0, {"suffixes": (None, None)}, [0, 0]),
+ ("a", "a", {"suffixes": None}, ["a", "a"]),
+ ("a", "a", {"suffixes": (None, None)}, ["a", "a"]),
],
)
def test_merge_suffix(col1, col2, kwargs, expected_cols):
@@ -2255,21 +2259,6 @@ def test_merge_duplicate_suffix(how, expected):
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize(
- "col1, col2, suffixes",
- [("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))],
-)
-def test_merge_suffix_error(col1, col2, suffixes):
- # issue: 24782
- a = DataFrame({col1: [1, 2, 3]})
- b = DataFrame({col2: [3, 4, 5]})
-
- # TODO: might reconsider current raise behaviour, see issue 24782
- msg = "columns overlap but no suffix specified"
- with pytest.raises(ValueError, match=msg):
- merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
-
-
@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}])
def test_merge_suffix_warns(suffixes):
a = DataFrame({"a": [1, 2, 3]})
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 0dbe45eeb1e82..8b2a43e6512e1 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -636,12 +636,6 @@ def test_join_multi_levels_invalid(self, portfolio, household):
):
household.join(portfolio, how="inner")
- portfolio2 = portfolio.copy()
- portfolio2.index.set_names(["household_id", "foo"])
-
- with pytest.raises(ValueError, match="columns overlap but no suffix specified"):
- portfolio2.join(portfolio, how="inner")
-
def test_join_multi_levels2(self):
# some more advanced merges
| - [X] closes https://github.com/pandas-dev/pandas/issues/46885
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46879 | 2022-04-26T20:05:57Z | 2022-08-09T01:29:22Z | null | 2022-08-09T01:29:22Z |
PERF: use C version of np.empty | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 5094f6f07d534..13bd95004445d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -873,7 +873,7 @@ def get_level_sorter(
"""
cdef:
Py_ssize_t i, l, r
- ndarray[intp_t, ndim=1] out = np.empty(len(codes), dtype=np.intp)
+ ndarray[intp_t, ndim=1] out = cnp.PyArray_EMPTY(1, codes.shape, cnp.NPY_INTP, 0)
for i in range(len(starts) - 1):
l, r = starts[i], starts[i + 1]
@@ -2255,11 +2255,11 @@ def maybe_convert_numeric(
int status, maybe_int
Py_ssize_t i, n = values.size
Seen seen = Seen(coerce_numeric)
- ndarray[float64_t, ndim=1] floats = np.empty(n, dtype='f8')
- ndarray[complex128_t, ndim=1] complexes = np.empty(n, dtype='c16')
- ndarray[int64_t, ndim=1] ints = np.empty(n, dtype='i8')
- ndarray[uint64_t, ndim=1] uints = np.empty(n, dtype='u8')
- ndarray[uint8_t, ndim=1] bools = np.empty(n, dtype='u1')
+ ndarray[float64_t, ndim=1] floats = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_FLOAT64, 0)
+ ndarray[complex128_t, ndim=1] complexes = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_COMPLEX128, 0)
+ ndarray[int64_t, ndim=1] ints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_INT64, 0)
+ ndarray[uint64_t, ndim=1] uints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT64, 0)
+ ndarray[uint8_t, ndim=1] bools = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT8, 0)
ndarray[uint8_t, ndim=1] mask = np.zeros(n, dtype="u1")
float64_t fval
bint allow_null_in_int = convert_to_masked_nullable
@@ -2479,11 +2479,11 @@ def maybe_convert_objects(ndarray[object] objects,
n = len(objects)
- floats = np.empty(n, dtype='f8')
- complexes = np.empty(n, dtype='c16')
- ints = np.empty(n, dtype='i8')
- uints = np.empty(n, dtype='u8')
- bools = np.empty(n, dtype=np.uint8)
+ floats = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_FLOAT64, 0)
+ complexes = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_COMPLEX128, 0)
+ ints = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_INT64, 0)
+ uints = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_UINT64, 0)
+ bools = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_UINT8, 0)
mask = np.full(n, False)
if convert_datetime:
@@ -2785,7 +2785,7 @@ cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas):
else:
# ExtensionDtype
cls = dtype.construct_array_type()
- i8vals = np.empty(len(datetimes), dtype="i8")
+ i8vals = cnp.PyArray_EMPTY(1, datetimes.shape, cnp.NPY_INT64, 0)
i8vals.fill(NPY_NAT)
result = cls(i8vals, dtype=dtype)
return result
@@ -2888,7 +2888,7 @@ def map_infer(
object val
n = len(arr)
- result = np.empty(n, dtype=object)
+ result = cnp.PyArray_EMPTY(1, arr.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
if ignore_na and checknull(arr[i]):
result[i] = arr[i]
@@ -3083,7 +3083,7 @@ cpdef ndarray eq_NA_compat(ndarray[object] arr, object key):
key is assumed to have `not isna(key)`
"""
cdef:
- ndarray[uint8_t, cast=True] result = np.empty(len(arr), dtype=bool)
+ ndarray[uint8_t, cast=True] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_BOOL, 0)
Py_ssize_t i
object item
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c96a65cdff525..f4d495de26600 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -123,7 +123,7 @@ def format_array_from_datetime(
ndarray[int64_t] consider_values
bint show_ms = False, show_us = False, show_ns = False
bint basic_format = False
- ndarray[object] result = np.empty(N, dtype=object)
+ ndarray[object] result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
object ts, res
npy_datetimestruct dts
@@ -349,7 +349,7 @@ def array_with_unit_to_datetime(
# and are in ignore mode
# redo as object
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
val = values[i]
@@ -668,7 +668,7 @@ cdef ndarray[object] ignore_errors_out_of_bounds_fallback(ndarray[object] values
Py_ssize_t i, n = len(values)
object val
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
val = values[i]
@@ -730,7 +730,7 @@ cdef _array_to_datetime_object(
assert is_raise or is_ignore or is_coerce
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
# We return an object array and only attempt to parse:
# 1) NaT or NaT-like values
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 3d04562cb73c3..c2637db23293e 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1057,7 +1057,7 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
cdef:
Py_ssize_t n = len(arr)
Py_ssize_t increment = arr.strides[0] // 8
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_INT64, 0)
_period_asfreq(
<int64_t*>cnp.PyArray_DATA(arr),
@@ -1440,7 +1440,7 @@ def extract_ordinals(ndarray values, freq) -> np.ndarray:
cdef:
Py_ssize_t i, n = values.size
int64_t ordinal
- ndarray ordinals = np.empty((<object>values).shape, dtype=np.int64)
+ ndarray ordinals = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_INT64, 0)
cnp.broadcast mi = cnp.PyArray_MultiIterNew2(ordinals, values)
object p
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46878 | 2022-04-26T18:16:07Z | 2022-04-27T12:35:40Z | 2022-04-27T12:35:40Z | 2022-04-27T14:55:56Z |
BUG: extra row created when calling to_excel for multi-index columns | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index d0fea32cafe26..d011f0f0a9ff4 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -705,8 +705,8 @@ def _format_regular_rows(self) -> Iterable[ExcelCell]:
else:
index_label = self.df.index.names[0]
- if isinstance(self.columns, MultiIndex):
- self.rowcounter += 1
+ # if isinstance(self.columns, MultiIndex):
+ # self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
When multi-level columns are written into excel, use to_ Excel method, there will be an extra row in the saved xlsx file

I tried to fix and welcome to comment for this update.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46875 | 2022-04-26T10:16:43Z | 2022-06-23T22:22:43Z | null | 2022-06-23T22:22:43Z |
TST: Fix flaky xfail condition typo | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 852e85968d43f..910449d98bcc5 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -840,7 +840,7 @@ def test_basic_series_frame_alignment(
and parser == "pandas"
and index_name == "index"
and r_idx_type == "i"
- and c_idx_type == "c"
+ and c_idx_type == "s"
):
reason = (
f"Flaky column ordering when engine={engine}, "
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
This flaky test resurfaced in https://github.com/pandas-dev/pandas/runs/6152914367?check_suite_focus=true but that was due to a typo in the if condition in https://github.com/pandas-dev/pandas/pull/46796
| https://api.github.com/repos/pandas-dev/pandas/pulls/46871 | 2022-04-25T19:57:55Z | 2022-04-26T00:22:29Z | 2022-04-26T00:22:29Z | 2022-04-26T01:15:58Z |
DOC: fix typo in docstrings | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index dda0d9549e7b3..ab42fcd92a3d9 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -1146,7 +1146,7 @@ def assert_frame_equal(
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
- differences. Is is mostly intended for use in unit tests.
+ differences. It is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
| Correct a spelling mistake in docstrings of `pandas.testing.assert_frame_equal`.
Before
```python: before
Is is mostly intended for use in unit tests.
```
After
```
It is mostly intended for use in unit tests.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/46866 | 2022-04-25T13:13:39Z | 2022-04-25T13:23:37Z | 2022-04-25T13:23:37Z | 2022-04-25T13:23:37Z |
BUG: Raise error when expr does not evaluate to bool in df.query | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index ba1c610decc7f..d00aa6d54687d 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -810,6 +810,7 @@ Indexing
- Bug in :meth:`NDFrame.xs`, :meth:`DataFrame.iterrows`, :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` not always propagating metadata (:issue:`28283`)
- Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`)
- Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`)
+- Bug in :meth:`DataFrame.query` doesn't follow python truthy-ness (:issue:`8560`)
-
Missing
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 44d79fe2f1519..1bcceaeccf108 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4188,14 +4188,19 @@ def query(self, expr: str, inplace: bool = False, **kwargs):
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
-
- try:
- result = self.loc[res]
- except ValueError:
+ is_res_unidimensional = res.ndim == 1
+ if is_res_unidimensional:
+ is_bool_result = is_bool_dtype(res)
+ res_accesser = self.loc
+ else:
+ is_bool_result = all(is_bool_dtype(x) for x in res.dtypes)
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
- result = self[res]
-
+ res_accesser = self
+ if not is_bool_result:
+ msg = f"expr must evaluate to boolean not {res.dtypes}"
+ raise ValueError(msg)
+ result = res_accesser[res]
if inplace:
self._update_inplace(result)
return None
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index fe3b04e8e27e6..b914c89d820c4 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -35,6 +35,18 @@ def skip_if_no_pandas_parser(parser):
class TestCompat:
+ def test_query_non_boolean_raise_error(self):
+ df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
+ msg = "expr must evaluate to boolean not"
+ with pytest.raises(ValueError, match=msg):
+ df.query("cat")
+
+ def test_query_non_boolean_raise_error_ndim2(self):
+ df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
+ msg = "expr must evaluate to boolean not"
+ with pytest.raises(ValueError, match=msg):
+ df.query("@df")
+
def setup_method(self):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
| - [X] closes #8560
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46862 | 2022-04-25T01:34:28Z | 2022-07-22T17:48:39Z | null | 2022-07-22T17:48:39Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.