title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
PERF: needs_i8_conversion expect dtype object
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 52606cd7a914e..d7b1741687441 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1090,22 +1090,20 @@ def is_numeric_v_string_like(a: ArrayLike, b) -> bool: ) -def needs_i8_conversion(arr_or_dtype) -> bool: +def needs_i8_conversion(dtype: DtypeObj | None) -> bool: """ - Check whether the array or dtype should be converted to int64. + Check whether the dtype should be converted to int64. - An array-like or dtype "needs" such a conversion if the array-like - or dtype is of a datetime-like dtype + Dtype "needs" such a conversion if the dtype is of a datetime-like dtype Parameters ---------- - arr_or_dtype : array-like or dtype - The array or dtype to check. + dtype : np.dtype, ExtensionDtype, or None Returns ------- boolean - Whether or not the array or dtype should be converted to int64. + Whether or not the dtype should be converted to int64. Examples -------- @@ -1114,30 +1112,27 @@ def needs_i8_conversion(arr_or_dtype) -> bool: >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) + False + >>> needs_i8_conversion(np.dtype(np.datetime64)) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) - True + False >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + False + >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype) True """ - if arr_or_dtype is None: - return False - if isinstance(arr_or_dtype, np.dtype): - return arr_or_dtype.kind in ["m", "M"] - elif isinstance(arr_or_dtype, ExtensionDtype): - return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) - - try: - dtype = get_dtype(arr_or_dtype) - except (TypeError, ValueError): + if dtype is None: return False if isinstance(dtype, np.dtype): - return dtype.kind in ["m", "M"] - return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) + return dtype.kind in "mM" + elif isinstance(dtype, ExtensionDtype): + return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) + return False def is_numeric_dtype(arr_or_dtype) -> bool: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6d9b2327ff72e..f9ce0fa1e6ee4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -960,9 +960,7 @@ def view(self, cls=None): if isinstance(cls, str): dtype = pandas_dtype(cls) - if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( - dtype - ): + if needs_i8_conversion(dtype): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 14cf5f317ed5a..38aa0d97f9c8a 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2015,7 +2015,7 @@ def _get_merge_keys( f"with type {repr(lt.dtype)}" ) - if needs_i8_conversion(lt): + if needs_i8_conversion(getattr(lt, "dtype", None)): if not isinstance(self.tolerance, datetime.timedelta): raise MergeError(msg) if self.tolerance < Timedelta(0): @@ -2101,7 +2101,7 @@ def injection(obj): raise ValueError(f"{side} keys must be sorted") # initial type conversion as needed - if needs_i8_conversion(left_values): + if needs_i8_conversion(getattr(left_values, "dtype", None)): if tolerance is not None: tolerance = Timedelta(tolerance) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 9c11bff8862c1..0fe8376baeb19 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -518,9 +518,12 @@ def test_needs_i8_conversion(): assert not com.needs_i8_conversion(pd.Series([1, 2])) assert not com.needs_i8_conversion(np.array(["a", "b"])) - assert com.needs_i8_conversion(np.datetime64) - assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) - assert com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern")) + assert not com.needs_i8_conversion(np.datetime64) + assert com.needs_i8_conversion(np.dtype(np.datetime64)) + assert not com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) + assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]").dtype) + assert not com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern")) + assert com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern").dtype) def test_is_numeric_dtype():
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52288
2023-03-29T22:26:50Z
2023-03-30T16:52:12Z
2023-03-30T16:52:12Z
2023-03-30T17:09:20Z
DOC: Escape backslash in read_clipboard docstring
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index e5981e8d15eb7..c25e184680f44 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -30,8 +30,8 @@ def read_clipboard( Parameters ---------- - sep : str, default '\s+' - A string or regex delimiter. The default of '\s+' denotes + sep : str, default '\\s+' + A string or regex delimiter. The default of '\\s+' denotes one or more whitespace characters. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
- [x] closes #51868 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52285
2023-03-29T20:32:50Z
2023-03-29T23:47:27Z
2023-03-29T23:47:26Z
2023-03-29T23:53:22Z
Backport PR #52022 on branch 2.0.x (API / CoW: Copy arrays by default in Series constructor)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 3c3c490d2c468..d9495d843d939 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -193,12 +193,13 @@ Copy-on-Write improvements - The :class:`DataFrame` constructor, when constructing a DataFrame from a :class:`Series` and specifying ``copy=False``, will now respect Copy-on-Write. -- The :class:`DataFrame` constructor, when constructing from a NumPy array, - will now copy the array by default to avoid mutating the :class:`DataFrame` +- The :class:`DataFrame` and :class:`Series` constructors, when constructing from + a NumPy array, will now copy the array by default to avoid mutating + the :class:`DataFrame` / :class:`Series` when mutating the array. Specify ``copy=False`` to get the old behavior. When setting ``copy=False`` pandas does not guarantee correct Copy-on-Write behavior when the NumPy array is modified after creation of the - :class:`DataFrame`. + :class:`DataFrame` / :class:`Series`. - The :meth:`DataFrame.from_records` will now respect Copy-on-Write when called with a :class:`DataFrame`. diff --git a/pandas/conftest.py b/pandas/conftest.py index 1aaad070dd313..8a9ba5f708d1c 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -739,7 +739,7 @@ def _create_series(index): """Helper for the _series dict""" size = len(index) data = np.random.randn(size) - return Series(data, index=index, name="a") + return Series(data, index=index, name="a", copy=False) _series = { diff --git a/pandas/core/series.py b/pandas/core/series.py index d747a36002ffd..475920da20e88 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -88,6 +88,7 @@ validate_percentile, ) +from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( LossySetitemError, convert_dtypes, @@ -370,14 +371,14 @@ def __init__( index=None, dtype: Dtype | None = None, name=None, - copy: bool = False, + copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None - and copy is False + and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) @@ -390,6 +391,13 @@ def __init__( self.name = name return + if isinstance(data, (ExtensionArray, np.ndarray)): + if copy is not False and using_copy_on_write(): + if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): + data = data.copy() + if copy is None: + copy = False + # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py index ee9e1dbc81e12..d38f0b8719de0 100644 --- a/pandas/tests/arrays/categorical/test_replace.py +++ b/pandas/tests/arrays/categorical/test_replace.py @@ -60,7 +60,7 @@ def test_replace_categorical(to_replace, value, result, expected_error_msg): # GH#26988 cat = Categorical(["a", "b"]) expected = Categorical(result) - result = pd.Series(cat).replace(to_replace, value)._values + result = pd.Series(cat, copy=False).replace(to_replace, value)._values tm.assert_categorical_equal(result, expected) if to_replace == "b": # the "c" test is supposed to be unchanged @@ -68,7 +68,7 @@ def test_replace_categorical(to_replace, value, result, expected_error_msg): # ensure non-inplace call does not affect original tm.assert_categorical_equal(cat, expected) - pd.Series(cat).replace(to_replace, value, inplace=True) + pd.Series(cat, copy=False).replace(to_replace, value, inplace=True) tm.assert_categorical_equal(cat, expected) diff --git a/pandas/tests/copy_view/test_astype.py b/pandas/tests/copy_view/test_astype.py index 47477b9e2c79d..dd4310c19aaff 100644 --- a/pandas/tests/copy_view/test_astype.py +++ b/pandas/tests/copy_view/test_astype.py @@ -200,7 +200,9 @@ def test_astype_arrow_timestamp(using_copy_on_write): result = df.astype("timestamp[ns][pyarrow]") if using_copy_on_write: assert not result._mgr._has_no_reference(0) - assert np.shares_memory(get_array(df, "a"), get_array(result, "a")._data) + # TODO(CoW): arrow is not setting copy=False in the Series constructor + # under the hood + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")._data) def test_convert_dtypes_infer_objects(using_copy_on_write): diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py index 76b281b9fad68..2f2548ad0f238 100644 --- a/pandas/tests/copy_view/test_constructors.py +++ b/pandas/tests/copy_view/test_constructors.py @@ -30,7 +30,7 @@ def test_series_from_series(dtype, using_copy_on_write): result = Series(ser, dtype=dtype) # the shallow copy still shares memory - assert np.shares_memory(ser.values, result.values) + assert np.shares_memory(get_array(ser), get_array(result)) if using_copy_on_write: assert result._mgr.blocks[0].refs.has_reference() @@ -40,13 +40,13 @@ def test_series_from_series(dtype, using_copy_on_write): result.iloc[0] = 0 assert ser.iloc[0] == 1 # mutating triggered a copy-on-write -> no longer shares memory - assert not np.shares_memory(ser.values, result.values) + assert not np.shares_memory(get_array(ser), get_array(result)) else: # mutating shallow copy does mutate original result.iloc[0] = 0 assert ser.iloc[0] == 0 # and still shares memory - assert np.shares_memory(ser.values, result.values) + assert np.shares_memory(get_array(ser), get_array(result)) # the same when modifying the parent result = Series(ser, dtype=dtype) @@ -90,6 +90,38 @@ def test_series_from_series_with_reindex(using_copy_on_write): assert not result._mgr.blocks[0].refs.has_reference() +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +@pytest.mark.parametrize( + "arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")] +) +def test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr): + if idx is None or dtype is not None: + fastpath = False + ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath) + ser_orig = ser.copy() + data = getattr(arr, "_data", arr) + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), data) + else: + assert np.shares_memory(get_array(ser), data) + + arr[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("copy", [True, False, None]) +def test_series_from_array_different_dtype(using_copy_on_write, copy): + arr = np.array([1, 2, 3], dtype="int64") + ser = Series(arr, dtype="int32", copy=copy) + assert not np.shares_memory(get_array(ser), arr) + + @pytest.mark.parametrize( "idx", [ diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index 29766ff392296..1f85c89ef38be 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -22,7 +22,7 @@ def test_array_from_scalars(self, data): assert isinstance(result, type(data)) def test_series_constructor(self, data): - result = pd.Series(data) + result = pd.Series(data, copy=False) assert result.dtype == data.dtype assert len(result) == len(data) if hasattr(result._mgr, "blocks"): diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index ca867ffb77296..5e4fdfece1596 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -254,7 +254,7 @@ def test_fillna_copy_frame(self, data_missing): def test_fillna_copy_series(self, data_missing): arr = data_missing.take([1, 1]) - ser = pd.Series(arr) + ser = pd.Series(arr, copy=False) ser_orig = ser.copy() filled_val = ser[0] diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 2aeb2af567ea0..b92df30fc5c7f 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -288,7 +288,7 @@ def test_fillna_copy_frame(self, data_missing, using_copy_on_write): def test_fillna_copy_series(self, data_missing, using_copy_on_write): arr = data_missing.take([1, 1]) - ser = pd.Series(arr) + ser = pd.Series(arr, copy=False) filled_val = ser[0] result = ser.fillna(filled_val) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index f60c38c041fcf..2496766eae034 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1362,7 +1362,7 @@ def check_can_hold_element(self, obj, elem, inplace: bool): def check_series_setitem(self, elem, index: Index, inplace: bool): arr = index._data.copy() - ser = Series(arr) + ser = Series(arr, copy=False) self.check_can_hold_element(ser, elem, inplace) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index fae29c124df71..0d84ecf955700 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2812,7 +2812,7 @@ def __getitem__(self, ix): def dtype(self): return DtypeStub() - series = Series(ExtTypeStub()) + series = Series(ExtTypeStub(), copy=False) res = repr(series) # This line crashed before #33770 was fixed. expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub" assert res == expected diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py index dd6aef04a2e6a..70cb173483692 100644 --- a/pandas/tests/reductions/test_stat_reductions.py +++ b/pandas/tests/reductions/test_stat_reductions.py @@ -70,7 +70,7 @@ def test_td64_mean(self, box): tdi = pd.TimedeltaIndex([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], unit="D") tdarr = tdi._data - obj = box(tdarr) + obj = box(tdarr, copy=False) result = obj.mean() expected = np.array(tdarr).mean() diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 636ebfded026d..39cbf2b7bac10 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -578,7 +578,7 @@ def test_setitem_scalar_into_readonly_backing_data(): array = np.zeros(5) array.flags.writeable = False # make the array immutable - series = Series(array) + series = Series(array, copy=False) for n in series.index: msg = "assignment destination is read-only" @@ -593,7 +593,7 @@ def test_setitem_slice_into_readonly_backing_data(): array = np.zeros(5) array.flags.writeable = False # make the array immutable - series = Series(array) + series = Series(array, copy=False) msg = "assignment destination is read-only" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1ec8d990add3a..2d91a4ef6c58e 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -527,7 +527,7 @@ def test_categorical_sideeffects_free(self): # however, copy is False by default # so this WILL change values cat = Categorical(["a", "b", "c", "a"]) - s = Series(cat) + s = Series(cat, copy=False) assert s.values is cat s = s.cat.rename_categories([1, 2, 3]) assert s.values is not cat
#52022
https://api.github.com/repos/pandas-dev/pandas/pulls/52282
2023-03-29T16:43:54Z
2023-03-29T19:15:50Z
2023-03-29T19:15:50Z
2023-03-29T19:19:36Z
BUG: mean/median with strings
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index efc8bc695df85..af1e86021ef19 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -311,8 +311,11 @@ Timezones Numeric ^^^^^^^ +- Bug in :meth:`Series.corr` and :meth:`Series.cov` raising ``AttributeError`` for masked dtypes (:issue:`51422`) +- Bug in :meth:`Series.mean`, :meth:`DataFrame.mean` with object-dtype values containing strings that can be converted to numbers (e.g. "2") returning incorrect numeric results; these now raise ``TypeError`` (:issue:`36703`, :issue:`44008`) - Bug in :meth:`DataFrame.corrwith` raising ``NotImplementedError`` for pyarrow-backed dtypes (:issue:`52314`) - Bug in :meth:`Series.corr` and :meth:`Series.cov` raising ``AttributeError`` for masked dtypes (:issue:`51422`) +- Bug in :meth:`Series.median` and :meth:`DataFrame.median` with object-dtype values containing strings that can be converted to numbers (e.g. "2") returning incorrect numeric results; these now raise ``TypeError`` (:issue:`34671`) - Conversion diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index ddec07c8bf890..8fddc8461dfbe 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -716,7 +716,8 @@ def nanmean( dtype_count = dtype count = _get_counts(values.shape, mask, axis, dtype=dtype_count) - the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) + the_sum = values.sum(axis, dtype=dtype_sum) + the_sum = _ensure_numeric(the_sum) if axis is not None and getattr(the_sum, "ndim", False): count = cast(np.ndarray, count) @@ -775,6 +776,11 @@ def get_median(x, _mask=None): dtype = values.dtype values, mask = _get_values(values, skipna, mask=mask, fill_value=0) if values.dtype.kind != "f": + if values.dtype == object: + # GH#34671 avoid casting strings to numeric + inferred = lib.infer_dtype(values) + if inferred in ["string", "mixed"]: + raise TypeError(f"Cannot convert {values} to numeric") try: values = values.astype("f8") except ValueError as err: @@ -1659,6 +1665,10 @@ def _ensure_numeric(x): if x.dtype.kind in "biu": x = x.astype(np.float64) elif x.dtype == object: + inferred = lib.infer_dtype(x) + if inferred in ["string", "mixed"]: + # GH#44008, GH#36703 avoid casting e.g. strings to numeric + raise TypeError(f"Could not convert {x} to numeric") try: x = x.astype(np.complex128) except (TypeError, ValueError): @@ -1671,6 +1681,9 @@ def _ensure_numeric(x): if not np.any(np.imag(x)): x = x.real elif not (is_float(x) or is_integer(x) or is_complex(x)): + if isinstance(x, str): + # GH#44008, GH#36703 avoid casting e.g. strings to numeric + raise TypeError(f"Could not convert string '{x}' to numeric") try: x = float(x) except (TypeError, ValueError): diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index 5995b78d4bea5..d75b784302676 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -244,6 +244,9 @@ def test_agg_cython_table_raises_frame(df, func, expected, axis): def test_agg_cython_table_raises_series(series, func, expected): # GH21224 msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type" + if func == "median" or func is np.nanmedian or func is np.median: + msg = r"Cannot convert \['a' 'b' 'c'\] to numeric" + with pytest.raises(expected, match=msg): # e.g. Series('a b'.split()).cumprod() will raise series.agg(func) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 0d352b8e34f37..096f6fe83ea88 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -169,15 +169,30 @@ def test_stat_op_api_float_string_frame(self, float_string_frame, axis, opname): ): getattr(float_string_frame, opname)(axis=axis) else: - msg = "|".join( - [ - "Could not convert", - "could not convert", - "can't multiply sequence by non-int", - "unsupported operand type", - "not supported between instances of", - ] - ) + if opname in ["var", "std", "sem", "skew", "kurt"]: + msg = "could not convert string to float: 'bar'" + elif opname == "product": + if axis == 1: + msg = "can't multiply sequence by non-int of type 'float'" + else: + msg = "can't multiply sequence by non-int of type 'str'" + elif opname == "sum": + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname == "mean": + if axis == 0: + # different message on different builds + msg = "|".join( + [ + r"Could not convert \['.*'\] to numeric", + "Could not convert string '(bar){30}' to numeric", + ] + ) + else: + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname in ["min", "max"]: + msg = "'[><]=' not supported between instances of 'float' and 'str'" + elif opname == "median": + msg = re.compile(r"Cannot convert \[.*\] to numeric", flags=re.S) with pytest.raises(TypeError, match=msg): getattr(float_string_frame, opname)(axis=axis) if opname != "nunique": @@ -1759,5 +1774,16 @@ def test_fails_on_non_numeric(kernel): "argument must be a string or a real number", ] ) + if kernel == "median": + # slightly different message on different builds + msg1 = ( + r"Cannot convert \[\[<class 'object'> <class 'object'> " + r"<class 'object'>\]\] to numeric" + ) + msg2 = ( + r"Cannot convert \[<class 'object'> <class 'object'> " + r"<class 'object'>\] to numeric" + ) + msg = "|".join([msg1, msg2]) with pytest.raises(TypeError, match=msg): getattr(df, kernel)(*args) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index ac192f190962d..838bfc6a76497 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -262,6 +262,8 @@ def _check(self, df, method, expected_columns, expected_columns_numeric): "can't multiply sequence by non-int of type 'str'", ] ) + if method == "median": + msg = r"Cannot convert \['a' 'b'\] to numeric" with pytest.raises(exception, match=msg): getattr(gb, method)() else: @@ -279,6 +281,8 @@ def _check(self, df, method, expected_columns, expected_columns_numeric): f"Cannot perform {method} with non-ordered Categorical", ] ) + if method == "median": + msg = r"Cannot convert \['a' 'b'\] to numeric" with pytest.raises(exception, match=msg): getattr(gb, method)(numeric_only=False) else: @@ -1467,6 +1471,8 @@ def test_numeric_only(kernel, has_arg, numeric_only, keys): "function is not implemented for this dtype", ] ) + if kernel == "median": + msg = r"Cannot convert \[<class 'object'> <class 'object'>\] to numeric" with pytest.raises(exception, match=msg): method(*args, **kwargs) elif not has_arg and numeric_only is not lib.no_default: diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index b5b13d6b10511..5fd1d84219167 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -654,7 +654,8 @@ def test_frame_multi_key_function_list_partial_failure(): grouped = data.groupby(["A", "B"]) funcs = [np.mean, np.std] - with pytest.raises(TypeError, match="Could not convert dullshinyshiny to numeric"): + msg = "Could not convert string 'dullshinyshiny' to numeric" + with pytest.raises(TypeError, match=msg): grouped.agg(funcs) @@ -973,6 +974,8 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only): # columns when numeric_only is False klass = ValueError if agg_function in ("std", "sem") else TypeError msg = "|".join(["[C|c]ould not convert", "can't multiply sequence"]) + if agg_function == "median": + msg = r"Cannot convert \['one' 'three' 'two'\] to numeric" with pytest.raises(klass, match=msg): getattr(grouped, agg_function)(numeric_only=numeric_only) else: diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index 9b3c7543def68..55a6bc37d6046 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -147,8 +147,21 @@ def test_groupby_raises_string( "idxmin": (TypeError, "'argmin' not allowed for this dtype"), "last": (None, ""), "max": (None, ""), - "mean": (TypeError, "Could not convert xy?z?w?t?y?u?i?o? to numeric"), - "median": (TypeError, "could not convert string to float"), + "mean": ( + TypeError, + "Could not convert string '(xy|xyzwt|xyz|xztuo)' to numeric", + ), + "median": ( + TypeError, + "|".join( + [ + r"Cannot convert \['x' 'y' 'z'\] to numeric", + r"Cannot convert \['x' 'y'\] to numeric", + r"Cannot convert \['x' 'y' 'z' 'w' 't'\] to numeric", + r"Cannot convert \['x' 'z' 't' 'u' 'o'\] to numeric", + ] + ), + ), "min": (None, ""), "ngroup": (None, ""), "nunique": (None, ""), @@ -197,7 +210,10 @@ def test_groupby_raises_string_np( klass, msg = { np.sum: (None, ""), - np.mean: (TypeError, "Could not convert xy?z?w?t?y?u?i?o? to numeric"), + np.mean: ( + TypeError, + "Could not convert string '(xyzwt|xy|xyz|xztuo)' to numeric", + ), }[groupby_func_np] _call_and_check(klass, msg, how, gb, groupby_func_np, tuple()) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 4b86a25f9587d..6a22faa623f69 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -857,8 +857,8 @@ def test_end_and_end_day_origin( ("mean", False, "Could not convert"), ("mean", lib.no_default, "Could not convert"), ("median", True, {"num": [12.5]}), - ("median", False, "could not convert"), - ("median", lib.no_default, "could not convert"), + ("median", False, r"Cannot convert \['cat_1' 'cat_2'\] to numeric"), + ("median", lib.no_default, r"Cannot convert \['cat_1' 'cat_2'\] to numeric"), ("std", True, {"num": [10.606601717798213]}), ("std", False, "could not convert string to float"), ("std", lib.no_default, "could not convert string to float"), diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py index eb11b62a651cc..0152303a7269a 100644 --- a/pandas/tests/series/test_reductions.py +++ b/pandas/tests/series/test_reductions.py @@ -129,3 +129,52 @@ def test_validate_stat_keepdims(): ) with pytest.raises(ValueError, match=msg): np.sum(ser, keepdims=True) + + +def test_mean_with_convertible_string_raises(using_array_manager): + # GH#44008 + ser = Series(["1", "2"]) + assert ser.sum() == "12" + msg = "Could not convert string '12' to numeric" + with pytest.raises(TypeError, match=msg): + ser.mean() + + df = ser.to_frame() + if not using_array_manager: + msg = r"Could not convert \['12'\] to numeric" + with pytest.raises(TypeError, match=msg): + df.mean() + + +def test_mean_dont_convert_j_to_complex(using_array_manager): + # GH#36703 + df = pd.DataFrame([{"db": "J", "numeric": 123}]) + if using_array_manager: + msg = "Could not convert string 'J' to numeric" + else: + msg = r"Could not convert \['J'\] to numeric" + with pytest.raises(TypeError, match=msg): + df.mean() + + with pytest.raises(TypeError, match=msg): + df.agg("mean") + + msg = "Could not convert string 'J' to numeric" + with pytest.raises(TypeError, match=msg): + df["db"].mean() + with pytest.raises(TypeError, match=msg): + np.mean(df["db"].astype("string").array) + + +def test_median_with_convertible_string_raises(using_array_manager): + # GH#34671 this _could_ return a string "2", but definitely not float 2.0 + msg = r"Cannot convert \['1' '2' '3'\] to numeric" + ser = Series(["1", "2", "3"]) + with pytest.raises(TypeError, match=msg): + ser.median() + + if not using_array_manager: + msg = r"Cannot convert \[\['1' '2' '3'\]\] to numeric" + df = ser.to_frame() + with pytest.raises(TypeError, match=msg): + df.median() diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 971535bd7d783..7d258033748b6 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -850,7 +850,9 @@ def test_ndarray(self): # Test convertible string ndarray s_values = np.array(["1", "2", "3"], dtype=object) - assert np.allclose(nanops._ensure_numeric(s_values), values) + msg = r"Could not convert \['1' '2' '3'\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) # Test non-convertible string ndarray s_values = np.array(["foo", "bar", "baz"], dtype=object) @@ -859,12 +861,19 @@ def test_ndarray(self): nanops._ensure_numeric(s_values) def test_convertable_values(self): - assert np.allclose(nanops._ensure_numeric("1"), 1.0) - assert np.allclose(nanops._ensure_numeric("1.1"), 1.1) - assert np.allclose(nanops._ensure_numeric("1+1j"), 1 + 1j) + with pytest.raises(TypeError, match="Could not convert string '1' to numeric"): + nanops._ensure_numeric("1") + with pytest.raises( + TypeError, match="Could not convert string '1.1' to numeric" + ): + nanops._ensure_numeric("1.1") + with pytest.raises( + TypeError, match=r"Could not convert string '1\+1j' to numeric" + ): + nanops._ensure_numeric("1+1j") def test_non_convertable_values(self): - msg = "Could not convert foo to numeric" + msg = "Could not convert string 'foo' to numeric" with pytest.raises(TypeError, match=msg): nanops._ensure_numeric("foo")
- [x] closes #36703 (Replace xxxx with the GitHub issue number) - [x] closes #44008 - [x] closes #34671 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. I'm not wild about this multi-pass implementation. Longer-term I think we need to re-write a lot of nanops to be single-pass (zero|few)-copy.
https://api.github.com/repos/pandas-dev/pandas/pulls/52281
2023-03-29T16:02:08Z
2023-04-24T18:49:48Z
2023-04-24T18:49:48Z
2023-04-30T17:43:09Z
Doc: add ExtensionArray.map to reference/extensions.rst
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 45df480779ee7..2d307859eb7a1 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -524,6 +524,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then pandas.api.extensions.ExtensionArray.insert \ pandas.api.extensions.ExtensionArray.isin \ pandas.api.extensions.ExtensionArray.isna \ + pandas.api.extensions.ExtensionArray.map \ pandas.api.extensions.ExtensionArray.ravel \ pandas.api.extensions.ExtensionArray.searchsorted \ pandas.api.extensions.ExtensionArray.shift \ diff --git a/doc/redirects.csv b/doc/redirects.csv index 97cd20b295e65..1792008799d45 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -79,6 +79,7 @@ generated/pandas.api.extensions.ExtensionArray.factorize,../reference/api/pandas generated/pandas.api.extensions.ExtensionArray.fillna,../reference/api/pandas.api.extensions.ExtensionArray.fillna generated/pandas.api.extensions.ExtensionArray,../reference/api/pandas.api.extensions.ExtensionArray generated/pandas.api.extensions.ExtensionArray.isna,../reference/api/pandas.api.extensions.ExtensionArray.isna +generated/pandas.api.extensions.ExtensionArray.map,../reference/api/pandas.api.extensions.ExtensionArray.map generated/pandas.api.extensions.ExtensionArray.nbytes,../reference/api/pandas.api.extensions.ExtensionArray.nbytes generated/pandas.api.extensions.ExtensionArray.ndim,../reference/api/pandas.api.extensions.ExtensionArray.ndim generated/pandas.api.extensions.ExtensionArray.shape,../reference/api/pandas.api.extensions.ExtensionArray.shape diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index b33efd388bd60..f25f0f5bc21fa 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -53,6 +53,7 @@ objects. api.extensions.ExtensionArray.insert api.extensions.ExtensionArray.isin api.extensions.ExtensionArray.isna + api.extensions.ExtensionArray.map api.extensions.ExtensionArray.ravel api.extensions.ExtensionArray.repeat api.extensions.ExtensionArray.searchsorted
xref #52247 & #52263.
https://api.github.com/repos/pandas-dev/pandas/pulls/52280
2023-03-29T15:47:03Z
2023-05-15T20:50:09Z
null
2023-05-15T20:50:10Z
PERF: dtype checks
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 2c0d75bcf2250..83ac5651c1d1c 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import ( + TYPE_CHECKING, Literal, cast, ) @@ -21,6 +22,7 @@ ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, + ExtensionDtype, PandasDtype, ) from pandas.core.dtypes.missing import array_equivalent @@ -53,6 +55,9 @@ from pandas.io.formats.printing import pprint_thing +if TYPE_CHECKING: + from pandas._typing import DtypeObj + def assert_almost_equal( left, @@ -965,7 +970,9 @@ def assert_series_equal( obj=str(obj), index_values=np.asarray(left.index), ) - elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype): + elif isinstance(left.dtype, ExtensionDtype) and isinstance( + right.dtype, ExtensionDtype + ): assert_extension_array_equal( left._values, right._values, @@ -1320,7 +1327,9 @@ def assert_copy(iter1, iter2, **eql_kwargs) -> None: assert elem1 is not elem2, msg -def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool: +def is_extension_array_dtype_and_needs_i8_conversion( + left_dtype: DtypeObj, right_dtype: DtypeObj +) -> bool: """ Checks that we have the combination of an ExtensionArraydtype and a dtype that should be converted to int64 @@ -1331,7 +1340,7 @@ def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> Related to issue #37609 """ - return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype) + return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype) def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None: diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 3d3a7fa6f0f33..9b1ef4ad8a41b 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -40,10 +40,10 @@ from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_dict_like, - is_extension_array_dtype, is_list_like, is_sequence, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCNDFrame, @@ -940,7 +940,7 @@ def series_generator(self): ser = self.obj._ixs(0, axis=0) mgr = ser._mgr - if is_extension_array_dtype(ser.dtype): + if isinstance(ser.dtype, ExtensionDtype): # values will be incorrect for this block # TODO(EA2D): special case would be unnecessary with 2D EAs obj = self.obj diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f8befdbc6ca9c..673058c30664b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -437,7 +437,7 @@ def __init__( # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) - elif is_categorical_dtype(values.dtype): + elif isinstance(values.dtype, CategoricalDtype): old_codes = extract_array(values)._codes codes = recode_for_categories( old_codes, values.dtype.categories, dtype.categories, copy=copy @@ -504,9 +504,7 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: if self.dtype is dtype: result = self.copy() if copy else self - elif is_categorical_dtype(dtype): - dtype = cast(CategoricalDtype, dtype) - + elif isinstance(dtype, CategoricalDtype): # GH 10696/18593/18630 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self @@ -2497,7 +2495,7 @@ def __init__(self, data) -> None: @staticmethod def _validate(data): - if not is_categorical_dtype(data.dtype): + if not isinstance(data.dtype, CategoricalDtype): raise AttributeError("Can only use .cat accessor with a 'category' dtype") # error: Signature of "_delegate_property_get" incompatible with supertype diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 334400cc13201..65c4565171410 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -91,7 +91,6 @@ is_integer_dtype, is_list_like, is_object_dtype, - is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, @@ -1405,7 +1404,7 @@ def __sub__(self, other): ): # DatetimeIndex, ndarray[datetime64] result = self._sub_datetime_arraylike(other) - elif is_period_dtype(other_dtype): + elif isinstance(other_dtype, PeriodDtype): # PeriodIndex result = self._sub_periodlike(other) elif is_integer_dtype(other_dtype): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 3e32598cc6b11..6b0cf2a900ae5 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -12,7 +12,6 @@ Literal, Sequence, Union, - cast, overload, ) @@ -55,7 +54,6 @@ is_dtype_equal, is_float_dtype, is_integer_dtype, - is_interval_dtype, is_list_like, is_object_dtype, is_scalar, @@ -63,7 +61,10 @@ needs_i8_conversion, pandas_dtype, ) -from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, @@ -317,8 +318,7 @@ def _ensure_simple_new_inputs( if dtype is not None: # GH 19262: dtype must be an IntervalDtype to override inferred dtype = pandas_dtype(dtype) - if is_interval_dtype(dtype): - dtype = cast(IntervalDtype, dtype) + if isinstance(dtype, IntervalDtype): if dtype.subtype is not None: left = left.astype(dtype.subtype) right = right.astype(dtype.subtype) @@ -344,7 +344,7 @@ def _ensure_simple_new_inputs( f"right [{type(right).__name__}] types" ) raise ValueError(msg) - if is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): + if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype): # GH 19016 msg = ( "category, object, and string subtypes are not supported " @@ -752,14 +752,14 @@ def _cmp_method(self, other, op): # determine the dtype of the elements we want to compare if isinstance(other, Interval): other_dtype = pandas_dtype("interval") - elif not is_categorical_dtype(other.dtype): + elif not isinstance(other.dtype, CategoricalDtype): other_dtype = other.dtype else: # for categorical defer to categories for dtype other_dtype = other.categories.dtype # extract intervals if we have interval categories with matching closed - if is_interval_dtype(other_dtype): + if isinstance(other_dtype, IntervalDtype): if self.closed != other.categories.closed: return invalid_comparison(self, other, op) @@ -768,7 +768,7 @@ def _cmp_method(self, other, op): ) # interval-like -> need same closed and matching endpoints - if is_interval_dtype(other_dtype): + if isinstance(other_dtype, IntervalDtype): if self.closed != other.closed: return invalid_comparison(self, other, op) elif not isinstance(other, Interval): @@ -951,7 +951,7 @@ def astype(self, dtype, copy: bool = True): if dtype is not None: dtype = pandas_dtype(dtype) - if is_interval_dtype(dtype): + if isinstance(dtype, IntervalDtype): if dtype == self.dtype: return self.copy() if copy else self @@ -1683,7 +1683,7 @@ def isin(self, values) -> npt.NDArray[np.bool_]: values = np.array(values) values = extract_array(values, extract_numpy=True) - if is_interval_dtype(values.dtype): + if isinstance(values.dtype, IntervalDtype): if self.closed != values.closed: # not comparable -> no overlap return np.zeros(self.shape, dtype=bool) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 6557a4b674b4f..83d50f2d0832a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -635,7 +635,7 @@ def astype(self, dtype, copy: bool = True): return self else: return self.copy() - if is_period_dtype(dtype): + if isinstance(dtype, PeriodDtype): return self.asfreq(dtype.freq) if is_datetime64_any_dtype(dtype): @@ -897,7 +897,7 @@ def period_array( if is_datetime64_dtype(data_dtype): return PeriodArray._from_datetime64(data, freq) - if is_period_dtype(data_dtype): + if isinstance(data_dtype, PeriodDtype): return PeriodArray(data, freq=freq) # other iterable of some kind @@ -966,7 +966,7 @@ def validate_dtype_freq( if dtype is not None: dtype = pandas_dtype(dtype) - if not is_period_dtype(dtype): + if not isinstance(dtype, PeriodDtype): raise ValueError("dtype must be PeriodDtype") if freq is None: freq = dtype.freq diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 3365ea0f9db7e..f55fde9c75e4b 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -45,7 +45,6 @@ is_array_like, is_bool_dtype, is_datetime64_any_dtype, - is_datetime64tz_dtype, is_dtype_equal, is_integer, is_list_like, @@ -54,6 +53,7 @@ is_string_dtype, pandas_dtype, ) +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, @@ -458,7 +458,7 @@ def __init__( data = extract_array(data, extract_numpy=True) if not isinstance(data, np.ndarray): # EA - if is_datetime64tz_dtype(data.dtype): + if isinstance(data.dtype, DatetimeTZDtype): warnings.warn( f"Creating SparseArray from {data.dtype} data " "loses timezone information. Cast to object before " diff --git a/pandas/core/base.py b/pandas/core/base.py index d085807981fa8..0a46b8d9c6e3a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -42,10 +42,10 @@ from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.common import ( - is_extension_array_dtype, is_object_dtype, is_scalar, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, @@ -565,7 +565,7 @@ def to_numpy( array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]') """ - if is_extension_array_dtype(self.dtype): + if isinstance(self.dtype, ExtensionDtype): return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) elif kwargs: bad_keys = list(kwargs.keys())[0] @@ -1132,7 +1132,7 @@ def _memory_usage(self, deep: bool = False) -> int: ) v = self.array.nbytes - if deep and is_object_dtype(self) and not PYPY: + if deep and is_object_dtype(self.dtype) and not PYPY: values = cast(np.ndarray, self._values) v += lib.memory_usage_of_objects(values) return v diff --git a/pandas/core/common.py b/pandas/core/common.py index 8f3b3bff5641a..6b7a0214925df 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -35,9 +35,9 @@ from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, - is_extension_array_dtype, is_integer, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, @@ -122,7 +122,7 @@ def is_bool_indexer(key: Any) -> bool: and convert to an ndarray. """ if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or ( - is_array_like(key) and is_extension_array_dtype(key.dtype) + is_array_like(key) and isinstance(key.dtype, ExtensionDtype) ): if key.dtype == np.object_: key_array = np.asarray(key) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 72fd7fadd0987..a0c74399baee9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -110,7 +110,6 @@ is_bool, is_bool_dtype, is_datetime64_any_dtype, - is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, @@ -123,6 +122,7 @@ is_timedelta64_dtype, pandas_dtype, ) +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, @@ -9623,7 +9623,7 @@ def align( if self.ndim == 1 or axis == 0: # If we are aligning timezone-aware DatetimeIndexes and the timezones # do not match, convert both to UTC. - if is_datetime64tz_dtype(left.index.dtype): + if isinstance(left.index.dtype, DatetimeTZDtype): if left.index.tz != right.index.tz: if join_index is not None: # GH#33671 copy to ensure we don't change the index on diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index e87a74e5885b3..79c1d8004c20b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -43,13 +43,15 @@ from pandas.core.dtypes.common import ( ensure_int64, is_bool, - is_categorical_dtype, is_dict_like, is_integer_dtype, - is_interval_dtype, is_numeric_dtype, is_scalar, ) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) from pandas.core.dtypes.missing import ( isna, notna, @@ -681,7 +683,7 @@ def value_counts( index_names = self.grouper.names + [self.obj.name] - if is_categorical_dtype(val.dtype) or ( + if isinstance(val.dtype, CategoricalDtype) or ( bins is not None and not np.iterable(bins) ): # scalar bins cannot be done at top level @@ -717,7 +719,7 @@ def value_counts( ) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] - if is_interval_dtype(lab.dtype): + if isinstance(lab.dtype, IntervalDtype): # TODO: should we do this inside II? lab_interval = cast(Interval, lab) diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 9f5dd5bec41ef..5849d4cf74ca8 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -11,14 +11,16 @@ import numpy as np from pandas.core.dtypes.common import ( - is_categorical_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_integer_dtype, is_list_like, - is_period_dtype, is_timedelta64_dtype, ) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + PeriodDtype, +) from pandas.core.dtypes.generic import ABCSeries from pandas.core.accessor import ( @@ -68,13 +70,13 @@ def _get_values(self): if is_datetime64_dtype(data.dtype): return DatetimeIndex(data, copy=False, name=self.name) - elif is_datetime64tz_dtype(data.dtype): + elif isinstance(data.dtype, DatetimeTZDtype): return DatetimeIndex(data, copy=False, name=self.name) elif is_timedelta64_dtype(data.dtype): return TimedeltaIndex(data, copy=False, name=self.name) - elif is_period_dtype(data.dtype): + elif isinstance(data.dtype, PeriodDtype): return PeriodArray(data, copy=False) raise TypeError( @@ -558,7 +560,7 @@ def __new__(cls, data: Series): f"cannot convert an object of type {type(data)} to a datetimelike index" ) - orig = data if is_categorical_dtype(data.dtype) else None + orig = data if isinstance(data.dtype, CategoricalDtype) else None if orig is not None: data = data._constructor( orig.array, @@ -572,11 +574,11 @@ def __new__(cls, data: Series): return ArrowTemporalProperties(data, orig) if is_datetime64_dtype(data.dtype): return DatetimeProperties(data, orig) - elif is_datetime64tz_dtype(data.dtype): + elif isinstance(data.dtype, DatetimeTZDtype): return DatetimeProperties(data, orig) elif is_timedelta64_dtype(data.dtype): return TimedeltaProperties(data, orig) - elif is_period_dtype(data.dtype): + elif isinstance(data.dtype, PeriodDtype): return PeriodProperties(data, orig) raise AttributeError("Can only use .dt accessor with datetimelike values") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index b740f58097509..89b1777360bac 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -4,6 +4,7 @@ TYPE_CHECKING, Any, Hashable, + cast, ) import numpy as np @@ -14,10 +15,8 @@ doc, ) -from pandas.core.dtypes.common import ( - is_categorical_dtype, - is_scalar, -) +from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, @@ -226,7 +225,7 @@ def __new__( # -------------------------------------------------------------------- - def _is_dtype_compat(self, other) -> Categorical: + def _is_dtype_compat(self, other: Index) -> Categorical: """ *this is an internal non-public method* @@ -245,9 +244,10 @@ def _is_dtype_compat(self, other) -> Categorical: ------ TypeError if the dtypes are not compatible """ - if is_categorical_dtype(other): - other = extract_array(other) - if not other._categories_match_up_to_permutation(self): + if isinstance(other.dtype, CategoricalDtype): + cat = extract_array(other) + cat = cast(Categorical, cat) + if not cat._categories_match_up_to_permutation(self._values): raise TypeError( "categories must match existing categories when appending" ) @@ -264,15 +264,15 @@ def _is_dtype_compat(self, other) -> Categorical: raise TypeError( "cannot append a non-category item to a CategoricalIndex" ) - other = other._values + cat = other._values - if not ((other == values) | (isna(other) & isna(values))).all(): + if not ((cat == values) | (isna(cat) & isna(values))).all(): # GH#37667 see test_equals_non_category raise TypeError( "categories must match existing categories when appending" ) - return other + return cat def equals(self, other: object) -> bool: """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1133ea6be26ac..e4e7f3a4d186d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -39,12 +39,12 @@ ) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_dtype_equal, is_integer, is_list_like, ) from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.arrays import ( DatetimeArray, @@ -139,7 +139,7 @@ def equals(self, other: Any) -> bool: inferable = self._data._infer_matches if other.dtype == object: should_try = other.inferred_type in inferable - elif is_categorical_dtype(other.dtype): + elif isinstance(other.dtype, CategoricalDtype): other = cast("CategoricalIndex", other) should_try = other.categories.inferred_type in inferable diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9690806afb173..73b0086dd0934 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -32,9 +32,9 @@ from pandas.core.dtypes.common import ( is_datetime64_dtype, - is_datetime64tz_dtype, is_scalar, ) +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import is_valid_na_for_dtype @@ -382,7 +382,7 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ if self.tz is not None: # If we have tz, we can compare to tzaware - return is_datetime64tz_dtype(dtype) + return isinstance(dtype, DatetimeTZDtype) # if we dont have tz, we can only compare to tznaive return is_datetime64_dtype(dtype) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 1740c5c368a94..a95579ea4fd37 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -50,7 +50,6 @@ is_float_dtype, is_integer, is_integer_dtype, - is_interval_dtype, is_list_like, is_number, is_object_dtype, @@ -508,7 +507,8 @@ def _needs_i8_conversion(self, key) -> bool: ------- bool """ - if is_interval_dtype(key) or isinstance(key, Interval): + key_dtype = getattr(key, "dtype", None) + if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): return self._needs_i8_conversion(key.left) i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex) @@ -539,7 +539,8 @@ def _maybe_convert_i8(self, key): return key scalar = is_scalar(key) - if is_interval_dtype(key) or isinstance(key, Interval): + key_dtype = getattr(key, "dtype", None) + if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): # convert left/right and reconstruct left = self._maybe_convert_i8(key.left) right = self._maybe_convert_i8(key.right) diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py index 2080ed156ed62..3e626fd6cbf57 100644 --- a/pandas/core/internals/api.py +++ b/pandas/core/internals/api.py @@ -14,10 +14,10 @@ from pandas._libs.internals import BlockPlacement -from pandas.core.dtypes.common import ( - is_datetime64tz_dtype, - is_period_dtype, - pandas_dtype, +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, ) from pandas.core.arrays import DatetimeArray @@ -56,7 +56,7 @@ def make_block( values, dtype = extract_pandas_array(values, dtype, ndim) - if klass is ExtensionBlock and is_period_dtype(values.dtype): + if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype): # GH-44681 changed PeriodArray to be stored in the 2D # NDArrayBackedExtensionBlock instead of ExtensionBlock # -> still allow ExtensionBlock to be passed in this case for back compat @@ -66,7 +66,7 @@ def make_block( dtype = dtype or values.dtype klass = get_block_type(dtype) - elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype): + elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype): # pyarrow calls get here values = DatetimeArray._simple_new(values, dtype=dtype) @@ -74,7 +74,7 @@ def make_block( placement = BlockPlacement(placement) ndim = maybe_infer_ndim(values, placement, ndim) - if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype): + if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)): # GH#41168 ensure we can pass 1D dt64tz values # More generally, any EA dtype that isn't is_1d_only_ea_dtype values = extract_array(values, extract_numpy=True) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index c3dacc2172aa7..14cf5f317ed5a 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -55,7 +55,6 @@ is_array_like, is_bool, is_bool_dtype, - is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, @@ -67,7 +66,10 @@ is_object_dtype, needs_i8_conversion, ) -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, @@ -1277,8 +1279,8 @@ def _maybe_coerce_merge_keys(self) -> None: lk = extract_array(lk, extract_numpy=True) rk = extract_array(rk, extract_numpy=True) - lk_is_cat = is_categorical_dtype(lk.dtype) - rk_is_cat = is_categorical_dtype(rk.dtype) + lk_is_cat = isinstance(lk.dtype, CategoricalDtype) + rk_is_cat = isinstance(rk.dtype, CategoricalDtype) lk_is_object = is_object_dtype(lk.dtype) rk_is_object = is_object_dtype(rk.dtype) @@ -1978,7 +1980,9 @@ def _get_merge_keys( # validate index types are the same for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): - if is_categorical_dtype(lk.dtype) and is_categorical_dtype(rk.dtype): + if isinstance(lk.dtype, CategoricalDtype) and isinstance( + rk.dtype, CategoricalDtype + ): # The generic error message is confusing for categoricals. # # In this function, the join keys include both the original @@ -2359,8 +2363,8 @@ def _factorize_keys( rk = cast("DatetimeArray", rk)._ndarray elif ( - is_categorical_dtype(lk.dtype) - and is_categorical_dtype(rk.dtype) + isinstance(lk.dtype, CategoricalDtype) + and isinstance(rk.dtype, CategoricalDtype) and is_dtype_equal(lk.dtype, rk.dtype) ): assert isinstance(lk, Categorical) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 85c5b089b3582..e0fae20dfb645 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -27,12 +27,12 @@ from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, - is_categorical_dtype, is_integer, is_list_like, is_object_dtype, is_re, ) +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, @@ -178,7 +178,7 @@ def __init__(self, data) -> None: from pandas.core.arrays.string_ import StringDtype self._inferred_dtype = self._validate(data) - self._is_categorical = is_categorical_dtype(data.dtype) + self._is_categorical = isinstance(data.dtype, CategoricalDtype) self._is_string = isinstance(data.dtype, StringDtype) self._data = data @@ -628,7 +628,7 @@ def cat( out = Index(result, dtype=object, name=self._orig.name) else: # Series - if is_categorical_dtype(self._orig.dtype): + if isinstance(self._orig.dtype, CategoricalDtype): # We need to infer the new categories. dtype = None else: diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 9e459a1b218dc..39254df7639b8 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -52,7 +52,6 @@ from pandas.core.dtypes.common import ( ensure_object, is_datetime64_dtype, - is_datetime64tz_dtype, is_float, is_integer, is_integer_dtype, @@ -60,6 +59,7 @@ is_numeric_dtype, is_scalar, ) +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, @@ -395,7 +395,7 @@ def _convert_listlike_datetimes( arg_dtype = getattr(arg, "dtype", None) # these are shortcutable tz = "utc" if utc else None - if is_datetime64tz_dtype(arg_dtype): + if isinstance(arg_dtype, DatetimeTZDtype): if not isinstance(arg, (DatetimeArray, DatetimeIndex)): return DatetimeIndex(arg, tz=tz, name=name) if utc: diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 944642bbfe8d3..ef8c7550476e3 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -32,10 +32,8 @@ from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend -from pandas.core.dtypes.common import ( - ensure_str, - is_period_dtype, -) +from pandas.core.dtypes.common import ensure_str +from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ABCIndex from pandas import ( @@ -369,7 +367,7 @@ def __init__( if len(timedeltas): obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat()) # Convert PeriodIndex to datetimes before serializing - if is_period_dtype(obj.index.dtype): + if isinstance(obj.index.dtype, PeriodDtype): obj.index = obj.index.to_timestamp() # exclude index from obj if index=False diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 35ea4dc911fa8..eec180d053114 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -19,17 +19,18 @@ from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import ( is_bool_dtype, - is_categorical_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, - is_extension_array_dtype, is_integer_dtype, is_numeric_dtype, - is_period_dtype, is_string_dtype, is_timedelta64_dtype, ) -from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) from pandas import DataFrame import pandas.core.common as com @@ -83,11 +84,11 @@ def as_json_table_type(x: DtypeObj) -> str: return "boolean" elif is_numeric_dtype(x): return "number" - elif is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or is_period_dtype(x): + elif is_datetime64_dtype(x) or isinstance(x, (DatetimeTZDtype, PeriodDtype)): return "datetime" elif is_timedelta64_dtype(x): return "duration" - elif is_extension_array_dtype(x): + elif isinstance(x, ExtensionDtype): return "any" elif is_string_dtype(x): return "string" @@ -131,21 +132,22 @@ def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]: "type": as_json_table_type(dtype), } - if is_categorical_dtype(dtype): + if isinstance(dtype, CategoricalDtype): cats = dtype.categories ordered = dtype.ordered field["constraints"] = {"enum": list(cats)} field["ordered"] = ordered - elif is_period_dtype(dtype): + elif isinstance(dtype, PeriodDtype): field["freq"] = dtype.freq.freqstr - elif is_datetime64tz_dtype(dtype): + elif isinstance(dtype, DatetimeTZDtype): if timezones.is_utc(dtype.tz): # timezone.utc has no "zone" attr field["tz"] = "UTC" else: - field["tz"] = dtype.tz.zone - elif is_extension_array_dtype(dtype): + # error: "tzinfo" has no attribute "zone" + field["tz"] = dtype.tz.zone # type: ignore[attr-defined] + elif isinstance(dtype, ExtensionDtype): field["extDtype"] = dtype.name return field diff --git a/pandas/io/orc.py b/pandas/io/orc.py index 10530a34ee218..410a11b8ca01c 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -14,11 +14,11 @@ from pandas.compat._optional import import_optional_dependency from pandas.util._validators import check_dtype_backend -from pandas.core.dtypes.common import ( - is_categorical_dtype, - is_interval_dtype, - is_period_dtype, - is_unsigned_integer_dtype, +from pandas.core.dtypes.common import is_unsigned_integer_dtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, + PeriodDtype, ) import pandas as pd @@ -213,12 +213,9 @@ def to_orc( # In Pyarrow 8.0.0 this check will no longer be needed if pa_version_under8p0: for dtype in df.dtypes: - if ( - is_categorical_dtype(dtype) - or is_interval_dtype(dtype) - or is_period_dtype(dtype) - or is_unsigned_integer_dtype(dtype) - ): + if isinstance( + dtype, (IntervalDtype, CategoricalDtype, PeriodDtype) + ) or is_unsigned_integer_dtype(dtype): raise NotImplementedError( "The dtype of one or more columns is not supported yet." ) diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 3d3e343050421..a6647df947961 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -19,14 +19,12 @@ from pandas.errors import DtypeWarning from pandas.util._exceptions import find_stack_level -from pandas.core.dtypes.common import ( - is_categorical_dtype, - pandas_dtype, -) +from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.concat import ( concat_compat, union_categoricals, ) +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.api import ensure_index_from_sequences @@ -381,10 +379,10 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: arrs = [chunk.pop(name) for chunk in chunks] # Check each arr for consistent types. dtypes = {a.dtype for a in arrs} - non_cat_dtypes = {x for x in dtypes if not is_categorical_dtype(x)} + non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} dtype = dtypes.pop() - if is_categorical_dtype(dtype): + if isinstance(dtype, CategoricalDtype): result[name] = union_categoricals(arrs, sort_categories=False) else: result[name] = concat_compat(arrs) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index bdf469b1f1d38..90bd6a03b9a34 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -55,11 +55,8 @@ from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, - is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, - is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, @@ -67,6 +64,11 @@ is_timedelta64_dtype, needs_i8_conversion, ) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) from pandas.core.dtypes.missing import array_equivalent from pandas import ( @@ -2070,7 +2072,9 @@ def convert( kwargs["freq"] = _ensure_decoded(self.freq) factory: type[Index] | type[DatetimeIndex] = Index - if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype): + if is_datetime64_dtype(values.dtype) or isinstance( + values.dtype, DatetimeTZDtype + ): factory = DatetimeIndex elif values.dtype == "i8" and "freq" in kwargs: # PeriodIndex data is stored as i8 @@ -2371,7 +2375,7 @@ def _get_atom(cls, values: ArrayLike) -> Col: if isinstance(values, Categorical): codes = values.codes atom = cls.get_atom_data(shape, kind=codes.dtype.name) - elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): + elif is_datetime64_dtype(dtype) or isinstance(dtype, DatetimeTZDtype): atom = cls.get_atom_datetime64(shape) elif is_timedelta64_dtype(dtype): atom = cls.get_atom_timedelta64(shape) @@ -2924,7 +2928,7 @@ def write_multi_index(self, key: str, index: MultiIndex) -> None: zip(index.levels, index.codes, index.names) ): # write the level - if is_extension_array_dtype(lev): + if isinstance(lev.dtype, ExtensionDtype): raise NotImplementedError( "Saving a MultiIndex with an extension dtype is not supported." ) @@ -3028,7 +3032,7 @@ def write_array( empty_array = value.size == 0 transposed = False - if is_categorical_dtype(value.dtype): + if isinstance(value.dtype, CategoricalDtype): raise NotImplementedError( "Cannot store a category dtype in a HDF5 dataset that uses format=" '"fixed". Use format="table".' @@ -3077,7 +3081,7 @@ def write_array( elif is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "datetime64" - elif is_datetime64tz_dtype(value.dtype): + elif isinstance(value.dtype, DatetimeTZDtype): # store as UTC # with a zone @@ -3950,7 +3954,7 @@ def _create_axes( tz = _get_tz(data_converted.tz) meta = metadata = ordered = None - if is_categorical_dtype(data_converted.dtype): + if isinstance(data_converted.dtype, CategoricalDtype): ordered = data_converted.ordered meta = "category" metadata = np.array(data_converted.categories, copy=False).ravel() diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 894ab110ef012..ca2676488dd11 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -42,7 +42,6 @@ from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( - is_datetime64tz_dtype, is_dict_like, is_list_like, ) @@ -110,7 +109,7 @@ def _handle_date_column( format = "s" if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: return to_datetime(col, errors="coerce", unit=format, utc=utc) - elif is_datetime64tz_dtype(col.dtype): + elif isinstance(col.dtype, DatetimeTZDtype): # coerce to UTC timezone # GH11216 return to_datetime(col, utc=True) @@ -129,7 +128,7 @@ def _parse_date_columns(data_frame, parse_dates): # we could in theory do a 'nice' conversion from a FixedOffset tz # GH11216 for col_name, df_col in data_frame.items(): - if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates: + if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 5b6326685d63e..c0a1cd0dc4d01 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -49,10 +49,10 @@ from pandas.core.dtypes.common import ( ensure_object, - is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import ( Categorical, @@ -2407,7 +2407,7 @@ def _prepare_categoricals(self, data: DataFrame) -> DataFrame: Check for categorical columns, retain categorical information for Stata file and convert categorical data to int """ - is_cat = [is_categorical_dtype(data[col].dtype) for col in data] + is_cat = [isinstance(data[col].dtype, CategoricalDtype) for col in data] if not any(is_cat): return data diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 23e8de69a21ed..4ac7f4c8d0a77 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -33,9 +33,9 @@ from pandas.core.dtypes.common import ( is_datetime64_dtype, is_numeric_dtype, - is_period_dtype, is_timedelta64_dtype, ) +from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, @@ -163,7 +163,7 @@ def infer_freq( if not hasattr(index, "dtype"): pass - elif is_period_dtype(index.dtype): + elif isinstance(index.dtype, PeriodDtype): raise TypeError( "PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq."
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52279
2023-03-29T15:44:24Z
2023-03-29T19:48:20Z
2023-03-29T19:48:20Z
2023-03-29T19:53:06Z
Backport PR #52057 on branch 2.0.x (PERF: Fix performance regression in read_csv when converting datetimes)
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 161bc250f9f7e..5d3a0103cefb5 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -66,7 +66,10 @@ ) from pandas.core.dtypes.missing import isna -from pandas import StringDtype +from pandas import ( + DatetimeIndex, + StringDtype, +) from pandas.core import algorithms from pandas.core.arrays import ( ArrowExtensionArray, @@ -1115,14 +1118,19 @@ def converter(*date_cols, col: Hashable): date_format.get(col) if isinstance(date_format, dict) else date_format ) - return tools.to_datetime( + result = tools.to_datetime( ensure_object(strs), format=date_fmt, utc=False, dayfirst=dayfirst, errors="ignore", cache=cache_dates, - )._values + ) + if isinstance(result, DatetimeIndex): + arr = result.to_numpy() + arr.flags.writeable = True + return arr + return result._values else: try: result = tools.to_datetime(
Backport PR #52057: PERF: Fix performance regression in read_csv when converting datetimes
https://api.github.com/repos/pandas-dev/pandas/pulls/52278
2023-03-29T15:29:36Z
2023-03-29T19:14:49Z
2023-03-29T19:14:49Z
2023-03-29T19:14:49Z
Set na_rep='=na()' as default value, issue #52258
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 72fd7fadd0987..d423e43872282 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2114,7 +2114,7 @@ def to_excel( self, excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str = "Sheet1", - na_rep: str = "", + na_rep: str = "=na()", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True,
- [x] closes issue #52258 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52277
2023-03-29T15:06:37Z
2023-03-29T15:50:11Z
null
2023-03-29T16:11:47Z
API / CoW: Respect CoW for DataFrame(Index)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index db69f43e92518..9d5fcf12ac3e3 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -191,7 +191,8 @@ Copy-on-Write improvements of those Series objects for the columns of the DataFrame (:issue:`50777`) - The :class:`DataFrame` constructor, when constructing a DataFrame from a - :class:`Series` and specifying ``copy=False``, will now respect Copy-on-Write. + :class:`Series` or :class:`Index` and specifying ``copy=False``, will + now respect Copy-on-Write. - The :class:`DataFrame` constructor, when constructing from a NumPy array, will now copy the array by default to avoid mutating the :class:`DataFrame` diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 48731c5ff85f4..5fa7901cf85e9 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -292,7 +292,7 @@ def ndarray_to_mgr( if values.ndim == 1: values = values.reshape(-1, 1) - elif isinstance(values, ABCSeries): + elif isinstance(values, (ABCSeries, Index)): if not copy_on_sanitize and ( dtype is None or astype_is_view(values.dtype, dtype) ): @@ -305,7 +305,7 @@ def ndarray_to_mgr( values = _ensure_2d(values) - elif isinstance(values, (np.ndarray, ExtensionArray, Index)): + elif isinstance(values, (np.ndarray, ExtensionArray)): # drop subclass info _copy = ( copy_on_sanitize diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py index 2e00352dda7ae..6244452541e0b 100644 --- a/pandas/tests/copy_view/test_constructors.py +++ b/pandas/tests/copy_view/test_constructors.py @@ -228,26 +228,28 @@ def test_dataframe_from_dict_of_series_with_reindex(dtype): assert np.shares_memory(arr_before, arr_after) +@pytest.mark.parametrize("cons", [Series, Index]) @pytest.mark.parametrize( "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)] ) -def test_dataframe_from_series(using_copy_on_write, data, dtype): - ser = Series(data, dtype=dtype) - ser_orig = ser.copy() - df = DataFrame(ser, dtype=dtype) - assert np.shares_memory(get_array(ser), get_array(df, 0)) +def test_dataframe_from_series_or_index(using_copy_on_write, data, dtype, cons): + obj = cons(data, dtype=dtype) + obj_orig = obj.copy() + df = DataFrame(obj, dtype=dtype) + assert np.shares_memory(get_array(obj), get_array(df, 0)) if using_copy_on_write: assert not df._mgr._has_no_reference(0) df.iloc[0, 0] = data[-1] if using_copy_on_write: - tm.assert_series_equal(ser, ser_orig) + tm.assert_equal(obj, obj_orig) -def test_dataframe_from_series_different_dtype(using_copy_on_write): - ser = Series([1, 2], dtype="int64") - df = DataFrame(ser, dtype="int32") - assert not np.shares_memory(get_array(ser), get_array(df, 0)) +@pytest.mark.parametrize("cons", [Series, Index]) +def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons): + obj = cons([1, 2], dtype="int64") + df = DataFrame(obj, dtype="int32") + assert not np.shares_memory(get_array(obj), get_array(df, 0)) if using_copy_on_write: assert df._mgr._has_no_reference(0)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52276
2023-03-29T14:42:25Z
2023-03-29T22:31:59Z
2023-03-29T22:31:59Z
2023-03-30T16:55:29Z
Backport PR #52017 on branch 2.0.x (BUG: Series constructor not respecting CoW when called with BlockManager)
diff --git a/pandas/core/series.py b/pandas/core/series.py index 1725f754ce065..f81957ebc5fd2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -379,6 +379,8 @@ def __init__( and dtype is None and copy is False ): + if using_copy_on_write(): + data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: @@ -397,6 +399,8 @@ def __init__( data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) + elif using_copy_on_write() and not copy: + data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name @@ -404,6 +408,9 @@ def __init__( NDFrame.__init__(self, data) return + if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: + data = data.copy(deep=False) + name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py index 93341286b56dd..39b1970e81f01 100644 --- a/pandas/tests/copy_view/test_constructors.py +++ b/pandas/tests/copy_view/test_constructors.py @@ -1,6 +1,7 @@ import numpy as np import pytest +import pandas as pd from pandas import ( DataFrame, DatetimeIndex, @@ -118,6 +119,33 @@ def test_series_from_index_different_dtypes(using_copy_on_write): assert ser._mgr._has_no_reference(0) +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +def test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath): + ser = Series([1, 2, 3], dtype="int64") + ser_orig = ser.copy() + ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx) + assert np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert not ser2._mgr._has_no_reference(0) + + ser2.iloc[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_series_from_block_manager_different_dtype(using_copy_on_write): + ser = Series([1, 2, 3], dtype="int64") + ser2 = Series(ser._mgr, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert ser2._mgr._has_no_reference(0) + + @pytest.mark.parametrize("func", [lambda x: x, lambda x: x._mgr]) @pytest.mark.parametrize("columns", [None, ["a"]]) def test_dataframe_constructor_mgr_or_df(using_copy_on_write, columns, func):
Backport PR #52017: BUG: Series constructor not respecting CoW when called with BlockManager
https://api.github.com/repos/pandas-dev/pandas/pulls/52275
2023-03-29T14:07:21Z
2023-03-29T16:27:16Z
2023-03-29T16:27:16Z
2023-03-29T16:27:16Z
Backport PR #52031 on branch 2.0.x (BUG-CoW: DataFrame constructed from Series not respecting CoW)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 63961afaf02c4..3c3c490d2c468 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -190,6 +190,9 @@ Copy-on-Write improvements of Series objects and specifying ``copy=False``, will now use a lazy copy of those Series objects for the columns of the DataFrame (:issue:`50777`) +- The :class:`DataFrame` constructor, when constructing a DataFrame from a + :class:`Series` and specifying ``copy=False``, will now respect Copy-on-Write. + - The :class:`DataFrame` constructor, when constructing from a NumPy array, will now copy the array by default to avoid mutating the :class:`DataFrame` when mutating the array. Specify ``copy=False`` to get the old behavior. diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index ac36cb4aa5960..7aad764aaa8eb 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -259,6 +259,7 @@ def ndarray_to_mgr( copy_on_sanitize = False if typ == "array" else copy vdtype = getattr(values, "dtype", None) + refs = None if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): # GH#19157 @@ -290,7 +291,20 @@ def ndarray_to_mgr( if values.ndim == 1: values = values.reshape(-1, 1) - elif isinstance(values, (np.ndarray, ExtensionArray, ABCSeries, Index)): + elif isinstance(values, ABCSeries): + if not copy_on_sanitize and ( + dtype is None or astype_is_view(values.dtype, dtype) + ): + refs = values._references + + if copy_on_sanitize: + values = values._values.copy() + else: + values = values._values + + values = _ensure_2d(values) + + elif isinstance(values, (np.ndarray, ExtensionArray, Index)): # drop subclass info _copy = ( copy_on_sanitize @@ -360,11 +374,11 @@ def ndarray_to_mgr( ] else: bp = BlockPlacement(slice(len(columns))) - nb = new_block_2d(values, placement=bp) + nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] else: bp = BlockPlacement(slice(len(columns))) - nb = new_block_2d(values, placement=bp) + nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] if len(columns) == 0: diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py index 93341286b56dd..2eff5f633e63d 100644 --- a/pandas/tests/copy_view/test_constructors.py +++ b/pandas/tests/copy_view/test_constructors.py @@ -200,6 +200,38 @@ def test_dataframe_from_dict_of_series_with_reindex(dtype): assert np.shares_memory(arr_before, arr_after) +@pytest.mark.parametrize( + "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)] +) +def test_dataframe_from_series(using_copy_on_write, data, dtype): + ser = Series(data, dtype=dtype) + ser_orig = ser.copy() + df = DataFrame(ser, dtype=dtype) + assert np.shares_memory(get_array(ser), get_array(df, 0)) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + + df.iloc[0, 0] = data[-1] + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + + +def test_dataframe_from_series_different_dtype(using_copy_on_write): + ser = Series([1, 2], dtype="int64") + df = DataFrame(ser, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_dataframe_from_series_infer_datetime(using_copy_on_write): + ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object) + df = DataFrame(ser) + assert not np.shares_memory(get_array(ser), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + @pytest.mark.parametrize("index", [None, [0, 1, 2]]) def test_dataframe_from_dict_of_series_with_dtype(index): # Variant of above, but now passing a dtype that causes a copy
Backport PR #52031: BUG-CoW: DataFrame constructed from Series not respecting CoW
https://api.github.com/repos/pandas-dev/pandas/pulls/52274
2023-03-29T14:06:22Z
2023-03-29T16:29:38Z
2023-03-29T16:29:38Z
2023-03-29T16:29:38Z
Backport PR #52060 on branch 2.0.x (API CoW: Return read_only NumPy array from ravel)
diff --git a/pandas/core/series.py b/pandas/core/series.py index 1725f754ce065..895b72decd379 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -765,7 +765,10 @@ def ravel(self, order: str = "C") -> ArrayLike: -------- numpy.ndarray.ravel : Return a flattened array. """ - return self._values.ravel(order=order) + arr = self._values.ravel(order=order) + if isinstance(arr, np.ndarray) and using_copy_on_write(): + arr.flags.writeable = False + return arr def __len__(self) -> int: """ diff --git a/pandas/tests/copy_view/test_array.py b/pandas/tests/copy_view/test_array.py index 501ef27bc291e..519479881948b 100644 --- a/pandas/tests/copy_view/test_array.py +++ b/pandas/tests/copy_view/test_array.py @@ -110,3 +110,12 @@ def test_series_to_numpy(using_copy_on_write): arr = ser.to_numpy(dtype="float64") assert not np.shares_memory(arr, get_array(ser, "name")) assert arr.flags.writeable is True + + +@pytest.mark.parametrize("order", ["F", "C"]) +def test_ravel_read_only(using_copy_on_write, order): + ser = Series([1, 2, 3]) + arr = ser.ravel(order=order) + if using_copy_on_write: + assert arr.flags.writeable is False + assert np.shares_memory(get_array(ser), arr)
Backport PR #52060: API CoW: Return read_only NumPy array from ravel
https://api.github.com/repos/pandas-dev/pandas/pulls/52273
2023-03-29T14:05:31Z
2023-03-29T16:29:01Z
2023-03-29T16:29:01Z
2023-03-29T16:29:01Z
DEPR: Passing a dictionary to SeriesGroupBy.agg
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 3a749708fb526..bf9c666ea521d 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -122,6 +122,7 @@ Deprecations - Deprecated 'method', 'limit', and 'fill_axis' keywords in :meth:`DataFrame.align` and :meth:`Series.align`, explicitly call ``fillna`` on the alignment results instead (:issue:`51856`) - Deprecated 'broadcast_axis' keyword in :meth:`Series.align` and :meth:`DataFrame.align`, upcast before calling ``align`` with ``left = DataFrame({col: left for col in right.columns}, index=right.index)`` (:issue:`51856`) - Deprecated the 'axis' keyword in :meth:`.GroupBy.idxmax`, :meth:`.GroupBy.idxmin`, :meth:`.GroupBy.fillna`, :meth:`.GroupBy.take`, :meth:`.GroupBy.skew`, :meth:`.GroupBy.rank`, :meth:`.GroupBy.cumprod`, :meth:`.GroupBy.cumsum`, :meth:`.GroupBy.cummax`, :meth:`.GroupBy.cummin`, :meth:`.GroupBy.pct_change`, :meth:`GroupBy.diff`, :meth:`.GroupBy.shift`, and :meth:`DataFrameGroupBy.corrwith`; for ``axis=1`` operate on the underlying :class:`DataFrame` instead (:issue:`50405`, :issue:`51046`) +- Deprecated passing a dictionary to :meth:`.SeriesGroupBy.agg`; pass a list of aggregations instead (:issue:`50684`) - Deprecated logical operations (``|``, ``&``, ``^``) between pandas objects and dtype-less sequences (e.g. ``list``, ``tuple``), wrap a sequence in a :class:`Series` or numpy array before operating instead (:issue:`51521`) - Deprecated :meth:`DataFrame.swapaxes` and :meth:`Series.swapaxes`, use :meth:`DataFrame.transpose` or :meth:`Series.transpose` instead (:issue:`51946`) - Deprecated parameter ``convert_type`` in :meth:`Series.apply` (:issue:`52140`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 79c1d8004c20b..72cc8128112eb 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -70,7 +70,8 @@ from pandas.core.groupby.groupby import ( GroupBy, GroupByPlot, - _agg_template, + _agg_template_frame, + _agg_template_series, _apply_docs, _transform_template, ) @@ -216,7 +217,7 @@ def _get_data_to_aggregate( def apply(self, func, *args, **kwargs) -> Series: return super().apply(func, *args, **kwargs) - @doc(_agg_template, examples=_agg_examples_doc, klass="Series") + @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): return self._aggregate_with_numba( @@ -308,6 +309,16 @@ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: raise SpecificationError("nested renamer is not supported") else: # GH#50684 - This accidentally worked in 1.x + msg = ( + "Passing a dictionary to SeriesGroupBy.agg is deprecated " + "and will raise in a future version of pandas. Pass a list " + "of aggregations instead." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) arg = list(arg.items()) elif any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] @@ -1293,7 +1304,7 @@ class DataFrameGroupBy(GroupBy[DataFrame]): """ ) - @doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame") + @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): return self._aggregate_with_numba( diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e591298e2a58e..7b367157113b5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -489,7 +489,7 @@ class providing the base-class of operations. -------- %(example)s""" -_agg_template = """ +_agg_template_series = """ Aggregate using one or more operations over the specified axis. Parameters @@ -503,23 +503,110 @@ class providing the base-class of operations. - function - string function name - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` - - dict of axis labels -> functions, function names or list of such. - None, in which case ``**kwargs`` are used with Named Aggregation. Here the output has one column for each element in ``**kwargs``. The name of the column is keyword, whereas the value determines the aggregation used to compute the values in the column. - Can also accept a Numba JIT function with - ``engine='numba'`` specified. Only passing a single function is supported - with this engine. + .. versionchanged:: 1.1.0 - If the ``'numba'`` engine is chosen, the function must be - a user defined function with ``values`` and ``index`` as the - first and second arguments respectively in the function signature. - Each group's index will be passed to the user defined function - and optionally available for use. + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + .. deprecated:: 2.1.0 + + Passing a dictionary is deprecated and will raise in a future version + of pandas. Pass a list of aggregations instead. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + + .. versionadded:: 1.1.0 +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + + .. versionadded:: 1.1.0 +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + +_agg_template_frame = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. .. versionchanged:: 1.1.0 + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + *args Positional arguments to pass to func. engine : str, default None diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index c4c7bee2970d0..74e9abd4bd883 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -727,7 +727,9 @@ def test_groupby_as_index_agg(df): expected3 = grouped["C"].sum() expected3 = DataFrame(expected3).rename(columns={"C": "Q"}) - result3 = grouped["C"].agg({"Q": np.sum}) + msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result3 = grouped["C"].agg({"Q": np.sum}) tm.assert_frame_equal(result3, expected3) # GH7115 & GH8112 & GH8582 diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 8e84a48eb7374..a582f00aa8e79 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -486,7 +486,9 @@ def test_multifunc_select_col_integer_cols(self, df): df.columns = np.arange(len(df.columns)) # it works! - df.groupby(1, as_index=False)[2].agg({"Q": np.mean}) + msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(1, as_index=False)[2].agg({"Q": np.mean}) def test_multiindex_columns_empty_level(self): lst = [["count", "values"], ["to filter", ""]] diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 8203b62106b3e..394d1f8d4a99b 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -34,7 +34,8 @@ "_new_Index", "_new_PeriodIndex", "_doc_template", - "_agg_template", + "_agg_template_series", + "_agg_template_frame", "_pipe_template", "__main__", "_transform_template",
- [x] closes #50684 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52268
2023-03-29T03:11:30Z
2023-03-31T17:13:26Z
2023-03-31T17:13:26Z
2023-03-31T18:13:32Z
DOC: Move notes to appropriate section in agg and aggregate docs
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bcba7c8c13f8c..c7c6dd32ed242 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9562,17 +9562,8 @@ def _gotitem( # TODO: _shallow_copy(subset)? return subset[key] - _agg_summary_and_see_also_doc = dedent( + _agg_see_also_doc = dedent( """ - The aggregation operations are always performed over an axis, either the - index (default) or the column axis. This behavior is different from - `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, - `var`), where the default is to compute the aggregation of the flattened - array, e.g., ``numpy.mean(arr_2d)`` as opposed to - ``numpy.mean(arr_2d, axis=0)``. - - `agg` is an alias for `aggregate`. Use the alias. - See Also -------- DataFrame.apply : Perform any type of operations. @@ -9635,7 +9626,7 @@ def _gotitem( _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], - see_also=_agg_summary_and_see_also_doc, + see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index f421ba448c97a..6d35bfef0d780 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -39,6 +39,13 @@ {see_also} Notes ----- +The aggregation operations are always performed over an axis, either the +index (default) or the column axis. This behavior is different from +`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, +`var`), where the default is to compute the aggregation of the flattened +array, e.g., ``numpy.mean(arr_2d)`` as opposed to +``numpy.mean(arr_2d, axis=0)``. + `agg` is an alias for `aggregate`. Use the alias. Functions that mutate the passed object can produce unexpected
- [x] closes #52253 Move a paragraph of notes from `Returns` to `Notes` in the docs of `pandas.DataFrame.agg` and `pandas.DataFrame.aggregate`, which removes the incorrect bolding.
https://api.github.com/repos/pandas-dev/pandas/pulls/52267
2023-03-29T02:22:55Z
2023-03-29T14:34:50Z
2023-03-29T14:34:50Z
2023-03-29T14:34:57Z
Backport PR #52260 on branch 2.0.x (CI/DEPS: Correct xfail condition for new pymysql)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 2504794384038..d2fb4a8426cf8 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -59,6 +59,7 @@ ArrowStringArray, StringArray, ) +from pandas.util.version import Version from pandas.io import sql from pandas.io.sql import ( @@ -2393,9 +2394,12 @@ def test_to_sql_with_negative_npinf(self, input, request): # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error # for pymysql version >= 0.10 # TODO(GH#36465): remove this version check after GH 36465 is fixed - import pymysql + pymysql = pytest.importorskip("pymysql") - if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns: + if ( + Version(pymysql.__version__) < Version("1.0.3") + and "infe0" in df.columns + ): mark = pytest.mark.xfail(reason="GH 36465") request.node.add_marker(mark)
Backport PR #52260: CI/DEPS: Correct xfail condition for new pymysql
https://api.github.com/repos/pandas-dev/pandas/pulls/52265
2023-03-28T21:59:38Z
2023-03-29T00:04:24Z
2023-03-29T00:04:24Z
2023-03-29T00:04:25Z
DEPR: logical operations with dtype-less sequences
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index bac567b537edc..475d48be01a9e 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -115,6 +115,7 @@ Deprecations - Deprecated 'method', 'limit', and 'fill_axis' keywords in :meth:`DataFrame.align` and :meth:`Series.align`, explicitly call ``fillna`` on the alignment results instead (:issue:`51856`) - Deprecated 'broadcast_axis' keyword in :meth:`Series.align` and :meth:`DataFrame.align`, upcast before calling ``align`` with ``left = DataFrame({col: left for col in right.columns}, index=right.index)`` (:issue:`51856`) - Deprecated the 'axis' keyword in :meth:`.GroupBy.idxmax`, :meth:`.GroupBy.idxmin`, :meth:`.GroupBy.fillna`, :meth:`.GroupBy.take`, :meth:`.GroupBy.skew`, :meth:`.GroupBy.rank`, :meth:`.GroupBy.cumprod`, :meth:`.GroupBy.cumsum`, :meth:`.GroupBy.cummax`, :meth:`.GroupBy.cummin`, :meth:`.GroupBy.pct_change`, :meth:`GroupBy.diff`, :meth:`.GroupBy.shift`, and :meth:`DataFrameGroupBy.corrwith`; for ``axis=1`` operate on the underlying :class:`DataFrame` instead (:issue:`50405`, :issue:`51046`) +- Deprecated logical operations (``|``, ``&``, ``^``) between pandas objects and dtype-less sequences (e.g. ``list``, ``tuple``), wrap a sequence in a :class:`Series` or numpy array before operating instead (:issue:`51521`) - Deprecated :meth:`DataFrame.swapaxes` and :meth:`Series.swapaxes`, use :meth:`DataFrame.transpose` or :meth:`Series.transpose` instead (:issue:`51946`) - diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index bae2ab15f3696..13eb526cff209 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -11,6 +11,7 @@ TYPE_CHECKING, Any, ) +import warnings import numpy as np @@ -22,6 +23,7 @@ ops as libops, ) from pandas._libs.tslibs import BaseOffset +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, @@ -416,6 +418,14 @@ def fill_bool(x, left=None): right = lib.item_from_zerodim(right) if is_list_like(right) and not hasattr(right, "dtype"): # e.g. list, tuple + warnings.warn( + "Logical ops (and, or, xor) between Pandas objects and dtype-less " + "sequences (e.g. list, tuple) are deprecated and will raise in a " + "future version. Wrap the object in a Series, Index, or np.array " + "before operating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) right = construct_1d_object_array_from_listlike(right) # NB: We assume extract_array has already been called on left and right diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index fa72bf4368b69..98fecca0c43ca 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -801,6 +801,14 @@ def test_series_ops_name_retention(self, flex, box, names, all_binary_operators) name = op.__name__.strip("_") is_logical = name in ["and", "rand", "xor", "rxor", "or", "ror"] + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + warn = None + if box in [list, tuple] and is_logical: + warn = FutureWarning + right = box(right) if flex: if is_logical: @@ -809,7 +817,8 @@ def test_series_ops_name_retention(self, flex, box, names, all_binary_operators) result = getattr(left, name)(right) else: # GH#37374 logical ops behaving as set ops deprecated - result = op(left, right) + with tm.assert_produces_warning(warn, match=msg): + result = op(left, right) assert isinstance(result, Series) if box in [Index, Series]: diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index 0d661f19087e6..ccd934c2f17bb 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -86,6 +86,11 @@ def test_logical_operators_int_dtype_with_float(self): # GH#9016: support bitwise op for integer types s_0123 = Series(range(4), dtype="int64") + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + msg = "Cannot perform.+with a dtyped.+array and scalar of type" with pytest.raises(TypeError, match=msg): s_0123 & np.NaN @@ -93,7 +98,8 @@ def test_logical_operators_int_dtype_with_float(self): s_0123 & 3.14 msg = "unsupported operand type.+for &:" with pytest.raises(TypeError, match=msg): - s_0123 & [0.1, 4, 3.14, 2] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s_0123 & [0.1, 4, 3.14, 2] with pytest.raises(TypeError, match=msg): s_0123 & np.array([0.1, 4, 3.14, 2]) with pytest.raises(TypeError, match=msg): @@ -101,11 +107,18 @@ def test_logical_operators_int_dtype_with_float(self): def test_logical_operators_int_dtype_with_str(self): s_1111 = Series([1] * 4, dtype="int8") + + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type" with pytest.raises(TypeError, match=msg): s_1111 & "a" with pytest.raises(TypeError, match="unsupported operand.+for &"): - s_1111 & ["a", "b", "c", "d"] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s_1111 & ["a", "b", "c", "d"] def test_logical_operators_int_dtype_with_bool(self): # GH#9016: support bitwise op for integer types @@ -116,10 +129,16 @@ def test_logical_operators_int_dtype_with_bool(self): result = s_0123 & False tm.assert_series_equal(result, expected) - result = s_0123 & [False] + warn_msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = s_0123 & [False] tm.assert_series_equal(result, expected) - result = s_0123 & (False,) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = s_0123 & (False,) tm.assert_series_equal(result, expected) result = s_0123 ^ False @@ -157,8 +176,14 @@ def test_logical_ops_bool_dtype_with_ndarray(self): left = Series([True, True, True, False, True]) right = [True, False, None, True, np.nan] + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + expected = Series([True, False, False, False, False]) - result = left & right + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left & right tm.assert_series_equal(result, expected) result = left & np.array(right) tm.assert_series_equal(result, expected) @@ -168,7 +193,8 @@ def test_logical_ops_bool_dtype_with_ndarray(self): tm.assert_series_equal(result, expected) expected = Series([True, True, True, True, True]) - result = left | right + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left | right tm.assert_series_equal(result, expected) result = left | np.array(right) tm.assert_series_equal(result, expected) @@ -178,7 +204,8 @@ def test_logical_ops_bool_dtype_with_ndarray(self): tm.assert_series_equal(result, expected) expected = Series([False, True, True, True, True]) - result = left ^ right + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left ^ right tm.assert_series_equal(result, expected) result = left ^ np.array(right) tm.assert_series_equal(result, expected) @@ -231,7 +258,13 @@ def test_scalar_na_logical_ops_corners(self): expected = Series(True, index=s.index) expected[::2] = False - result = s & list(s) + + msg = ( + r"Logical ops \(and, or, xor\) between Pandas objects and " + "dtype-less sequences" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s & list(s) tm.assert_series_equal(result, expected) def test_scalar_na_logical_ops_corners_aligns(self):
- [x] closes #51521 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52264
2023-03-28T21:47:34Z
2023-03-29T20:10:43Z
2023-03-29T20:10:42Z
2023-03-29T21:07:44Z
Revert "DOC: Add .map to ExtensionArray reference"
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 2d307859eb7a1..45df480779ee7 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -524,7 +524,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then pandas.api.extensions.ExtensionArray.insert \ pandas.api.extensions.ExtensionArray.isin \ pandas.api.extensions.ExtensionArray.isna \ - pandas.api.extensions.ExtensionArray.map \ pandas.api.extensions.ExtensionArray.ravel \ pandas.api.extensions.ExtensionArray.searchsorted \ pandas.api.extensions.ExtensionArray.shift \ diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index f25f0f5bc21fa..b33efd388bd60 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -53,7 +53,6 @@ objects. api.extensions.ExtensionArray.insert api.extensions.ExtensionArray.isin api.extensions.ExtensionArray.isna - api.extensions.ExtensionArray.map api.extensions.ExtensionArray.ravel api.extensions.ExtensionArray.repeat api.extensions.ExtensionArray.searchsorted diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index d583a68ab4e0b..a5032c590300c 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1719,12 +1719,6 @@ def map(self, mapper, na_action=None): The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned. - - Examples - -------- - >>> ext_arr = pd.array([1, 2, 3]) - >>> ext_arr.map(str) - array(['1', '2', '3'], dtype=object) """ return map_array(self, mapper, na_action=na_action)
Causes the doc build to currently fail. Sorry @topper-123 could you resubmit the PR?
https://api.github.com/repos/pandas-dev/pandas/pulls/52263
2023-03-28T21:41:57Z
2023-03-28T23:55:30Z
2023-03-28T23:55:30Z
2023-03-29T15:49:18Z
STYLE: turn off PLW2901 (but keep some changes)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index c455f91656909..89d02f7c1d444 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1124,8 +1124,10 @@ def _range_from_fields( freqstr = freq.freqstr year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): - y, m = parsing.quarter_to_myear(y, q, freqstr) - val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) + calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr) + val = libperiod.period_ordinal( + calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base + ) ordinals.append(val) else: freq = to_offset(freq) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 98ed86e8a4dec..5175884bca210 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -568,8 +568,7 @@ def __init__( if isinstance(w, PyTablesExpr): local_dict = w.env.scope else: - w = _validate_where(w) - where[idx] = w + where[idx] = _validate_where(w) _where = " & ".join([f"({w})" for w in com.flatten(where)]) else: # _validate_where ensures we otherwise have a string diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index a9ce262c356db..b59021205f02e 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -162,9 +162,7 @@ def maybe_lift(lab, size: int) -> tuple[np.ndarray, int]: lshape = list(shape) if not xnull: for i, (lab, size) in enumerate(zip(labels, shape)): - lab, size = maybe_lift(lab, size) - labels[i] = lab - lshape[i] = size + labels[i], lshape[i] = maybe_lift(lab, size) labels = list(labels) diff --git a/pyproject.toml b/pyproject.toml index 7bd84911c9a9a..d5298ed186043 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -266,6 +266,8 @@ ignore = [ "PLR0912", # Too many statements "PLR0915", + # Redefined loop name + "PLW2901", # Global statements are discouraged "PLW0603", # Docstrings should not be included in stubs @@ -302,10 +304,8 @@ exclude = [ # relative imports allowed for asv_bench "asv_bench/*" = ["TID"] # to be enabled gradually -"pandas/core/*" = ["PLR5501", "PLW2901"] -"pandas/io/*" = ["PLW2901"] -"pandas/tests/*" = ["B028", "PLW2901"] -"pandas/plotting/*" = ["PLW2901"] +"pandas/core/*" = ["PLR5501"] +"pandas/tests/*" = ["B028"] "scripts/*" = ["B028"] # Keep this one enabled "pandas/_typing.py" = ["TCH"] @@ -356,6 +356,7 @@ disable = [ "use-implicit-booleaness-not-len", "wrong-import-order", "wrong-import-position", + "redefined-loop-name", # misc "abstract-class-instantiated", diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index c1b8759319a18..4c133483f571f 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -130,15 +130,14 @@ def get_api_items(api_doc_fd): if line_stripped == "": position = None continue - item = line_stripped.strip() - if item in IGNORE_VALIDATION: + if line_stripped in IGNORE_VALIDATION: continue func = importlib.import_module(current_module) - for part in item.split("."): + for part in line_stripped.split("."): func = getattr(func, part) yield ( - ".".join([current_module, item]), + ".".join([current_module, line_stripped]), func, current_section, current_subsection,
Continuation of work started on closed issue #51708. Didn't find a new issue to link this PR, may I ask you @MarcoGorelli if it is fine to link it to the closed one or shall I wait for a new issue to be opened?
https://api.github.com/repos/pandas-dev/pandas/pulls/52262
2023-03-28T20:12:33Z
2023-04-12T17:02:50Z
2023-04-12T17:02:49Z
2023-04-12T17:02:50Z
BUG: DataFrame reductions losing EA dtypes
diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py index 7900bcc6a8e6e..c572b422893d5 100644 --- a/pandas/core/array_algos/masked_reductions.py +++ b/pandas/core/array_algos/masked_reductions.py @@ -155,7 +155,7 @@ def mean( skipna: bool = True, axis: AxisInt | None = None, ): - if not values.size or mask.all(): + if (not values.size or mask.all()) and (values.ndim == 1 or axis is None): return libmissing.NA return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis) @@ -168,7 +168,7 @@ def var( axis: AxisInt | None = None, ddof: int = 1, ): - if not values.size or mask.all(): + if (not values.size or mask.all()) and (values.ndim == 1 or axis is None): return libmissing.NA return _reductions( @@ -184,7 +184,7 @@ def std( axis: AxisInt | None = None, ddof: int = 1, ): - if not values.size or mask.all(): + if (not values.size or mask.all()) and (values.ndim == 1 or axis is None): return libmissing.NA return _reductions( diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 998aa6b9e6f08..35f4f7283bb98 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1222,6 +1222,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs): ------ TypeError : subclass does not define reductions """ + keepdims = kwargs.pop("keepdims", False) pa_type = self._pa_array.type data_to_reduce = self._pa_array @@ -1289,6 +1290,12 @@ def pyarrow_meth(data, skip_nulls, **kwargs): f"upgrading pyarrow." ) raise TypeError(msg) from err + + if keepdims: + # TODO: is there a way to do this without .as_py() + result = pa.array([result.as_py()], type=result.type) + return type(self)(result) + if pc.is_null(result).as_py(): return self.dtype.na_value diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f8befdbc6ca9c..f090f8cbcef4a 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2049,6 +2049,11 @@ def min(self, *, skipna: bool = True, **kwargs): ------- min : the minimum of this `Categorical`, NA value if empty """ + keepdims = kwargs.pop("keepdims", False) + if keepdims: + result = self.min(skipna=skipna, **kwargs) + return type(self)([result], dtype=self.dtype) + nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_min((), kwargs) self.check_for_ordered("min") @@ -2081,6 +2086,11 @@ def max(self, *, skipna: bool = True, **kwargs): ------- max : the maximum of this `Categorical`, NA if array is empty """ + keepdims = kwargs.pop("keepdims", False) + if keepdims: + result = self.max(skipna=skipna, **kwargs) + return type(self)([result], dtype=self.dtype) + nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_max((), kwargs) self.check_for_ordered("max") diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index aa3516c3ecb4f..54cc8b2e6ad79 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -1074,6 +1074,10 @@ def _quantile( # Reductions def _reduce(self, name: str, *, skipna: bool = True, **kwargs): + keepdims = kwargs.pop("keepdims", False) + if keepdims and "axis" not in kwargs: + return self.reshape(-1, 1)._reduce(name=name, skipna=skipna, **kwargs) + if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}: return getattr(self, name)(skipna=skipna, **kwargs) @@ -1081,13 +1085,16 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs): mask = self._mask # median, skew, kurt, sem + axis = kwargs.pop("axis", 0) op = getattr(nanops, f"nan{name}") - result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs) + result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs) if np.isnan(result): return libmissing.NA - return result + return self._wrap_reduction_result( + name=name, result=result, skipna=skipna, axis=axis + ) def _wrap_reduction_result(self, name: str, result, skipna, **kwargs): if isinstance(result, np.ndarray): @@ -1098,7 +1105,8 @@ def _wrap_reduction_result(self, name: str, result, skipna, **kwargs): else: mask = self._mask.any(axis=axis) - return self._maybe_mask_result(result, mask) + if name not in ["argmin", "argmax"]: + return self._maybe_mask_result(result, mask) return result def sum( diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bcba7c8c13f8c..f6b6131e1803e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10906,13 +10906,25 @@ def func(values: np.ndarray): # We only use this in the case that operates on self.values return op(values, axis=axis, skipna=skipna, **kwds) + is_am = isinstance(self._mgr, ArrayManager) + def blk_func(values, axis: Axis = 1): if isinstance(values, ExtensionArray): - if not is_1d_only_ea_dtype(values.dtype) and not isinstance( - self._mgr, ArrayManager - ): - return values._reduce(name, axis=1, skipna=skipna, **kwds) - return values._reduce(name, skipna=skipna, **kwds) + if not is_1d_only_ea_dtype(values.dtype): + if is_am: + # error: "ExtensionArray" has no attribute "reshape"; + # maybe "shape"? + vals2d = values.reshape(1, -1) # type: ignore[attr-defined] + return vals2d._reduce(name, axis=1, skipna=skipna, **kwds) + else: + return values._reduce(name, axis=1, skipna=skipna, **kwds) + + try: + return values._reduce(name, skipna=skipna, keepdims=True, **kwds) + except (TypeError, ValueError): + # no keepdims keyword yet; ValueError gets raised by + # util validator functions + return values._reduce(name, skipna=skipna, **kwds) else: return op(values, axis=axis, skipna=skipna, **kwds) diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 407e16e1fa187..537029c0b3a34 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -976,24 +976,24 @@ def reduce(self, func: Callable) -> Self: ------- ArrayManager """ - result_arrays: list[np.ndarray] = [] + result_arrays: list[ArrayLike] = [] for i, arr in enumerate(self.arrays): res = func(arr, axis=0) - # TODO NaT doesn't preserve dtype, so we need to ensure to create - # a timedelta result array if original was timedelta - # what if datetime results in timedelta? (eg std) - dtype = arr.dtype if res is NaT else None - result_arrays.append( - sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] - ) + if isinstance(res, (np.ndarray, ExtensionArray)): + # keepdims worked! + result_arrays.append(res) + else: + # TODO NaT doesn't preserve dtype, so we need to ensure to create + # a timedelta result array if original was timedelta + # what if datetime results in timedelta? (eg std) + dtype = arr.dtype if res is NaT else None + result_arrays.append(sanitize_array([res], None, dtype=dtype)) index = Index._simple_new(np.array([None], dtype=object)) # placeholder columns = self.items - # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; - # expected "List[Union[ndarray, ExtensionArray]]" - new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + new_mgr = type(self)(result_arrays, [index, columns]) return new_mgr def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f48b044ff0016..ebba624ac31e7 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -339,7 +339,11 @@ def reduce(self, func) -> list[Block]: if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs - res_values = np.array([[result]]) + if isinstance(result, (np.ndarray, ExtensionArray)): + # keepdims=True worked + res_values = result + else: + res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py index 55d39cf84eb30..86d556f3301e7 100644 --- a/pandas/tests/arrays/categorical/test_analytics.py +++ b/pandas/tests/arrays/categorical/test_analytics.py @@ -124,7 +124,7 @@ def test_numpy_min_max_raises(self, method): with pytest.raises(TypeError, match=re.escape(msg)): method(cat) - @pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"]) + @pytest.mark.parametrize("kwarg", ["axis", "out"]) @pytest.mark.parametrize("method", ["min", "max"]) def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg): cat = Categorical(["a", "b", "c", "b"], ordered=True) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 28809e2ecb788..ad09ffd63c8c3 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -693,14 +693,7 @@ def test_std_timedelta64_skipna_false(self): def test_std_datetime64_with_nat( self, values, skipna, using_array_manager, request ): - # GH#51335 - if using_array_manager and ( - not skipna or all(value is pd.NaT for value in values) - ): - mark = pytest.mark.xfail( - reason="GH#51446: Incorrect type inference on NaT in reduction result" - ) - request.node.add_marker(mark) + # GH#51335, GH#51446 df = DataFrame({"a": to_datetime(values)}) result = df.std(skipna=skipna) if not skipna or all(value is pd.NaT for value in values): @@ -918,7 +911,7 @@ def test_mean_extensionarray_numeric_only_true(self): arr = np.random.randint(1000, size=(10, 5)) df = DataFrame(arr, dtype="Int64") result = df.mean(numeric_only=True) - expected = DataFrame(arr).mean() + expected = DataFrame(arr).mean().astype("Float64") tm.assert_series_equal(result, expected) def test_stats_mixed_type(self, float_string_frame): @@ -1726,3 +1719,19 @@ def test_fails_on_non_numeric(kernel): ) with pytest.raises(TypeError, match=msg): getattr(df, kernel)(*args) + + +@pytest.mark.parametrize( + "dtype", ["Int64", pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow"))] +) +def test_Int64_mean_preserves_dtype(dtype): + # GH#42895 + arr = np.random.randn(4, 3).astype("int64") + df = DataFrame(arr).astype(dtype) + df.iloc[:, 1] = pd.NA + assert (df.dtypes == dtype).all() + + res = df.mean() + exp_dtype = "Float64" if dtype == "Int64" else "float64[pyarrow]" + expected = Series([arr[:, 0].mean(), pd.NA, arr[:, 2].mean()], dtype=exp_dtype) + tm.assert_series_equal(res, expected)
- [x] closes #51446 (Replace xxxx with the GitHub issue number) - [x] closes #42895 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. In the absence of 2D EAs, we need reductions to sometimes pretend the EA is 2D. Enter "keepdims" adapted from numpy reductions. This is still pretty ugly. I'm open to ideas to clean it up. cc @rhshadrach any other particular cases need testing?
https://api.github.com/repos/pandas-dev/pandas/pulls/52261
2023-03-28T20:11:42Z
2023-04-21T20:19:36Z
null
2023-04-21T20:19:44Z
CI/DEPS: Correct xfail condition for new pymysql
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index ab88e4ccd8b82..cd1f0ce6fcfd8 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -59,6 +59,7 @@ ArrowStringArray, StringArray, ) +from pandas.util.version import Version from pandas.io import sql from pandas.io.sql import ( @@ -2397,9 +2398,12 @@ def test_to_sql_with_negative_npinf(self, input, request): # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error # for pymysql version >= 0.10 # TODO(GH#36465): remove this version check after GH 36465 is fixed - import pymysql + pymysql = pytest.importorskip("pymysql") - if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns: + if ( + Version(pymysql.__version__) < Version("1.0.3") + and "infe0" in df.columns + ): mark = pytest.mark.xfail(reason="GH 36465") request.node.add_marker(mark)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52260
2023-03-28T20:08:18Z
2023-03-28T21:59:27Z
2023-03-28T21:59:27Z
2023-03-28T21:59:31Z
Backport PR #52075 on branch 2.0.x (BUG: Arrow setitem segfaults when len > 145 000)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 81d8183a79bc1..8612f5a4718cd 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1618,6 +1618,10 @@ def _replace_with_mask( indices = pa.array(indices, type=pa.int64()) replacements = replacements.take(indices) return cls._if_else(mask, replacements, values) + if isinstance(values, pa.ChunkedArray) and pa.types.is_boolean(values.type): + # GH#52059 replace_with_mask segfaults for chunked array + # https://github.com/apache/arrow/issues/34634 + values = values.combine_chunks() try: return pc.replace_with_mask(values, mask, replacements) except pa.ArrowNotImplementedError: diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 9a6d88f2adfe0..4f38d1e6ed7dd 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -2333,3 +2333,12 @@ def test_series_from_string_array(dtype): ser = pd.Series(arr, dtype=dtype) expected = pd.Series(ArrowExtensionArray(arr), dtype=dtype) tm.assert_series_equal(ser, expected) + + +def test_setitem_boolean_replace_with_mask_segfault(): + # GH#52059 + N = 145_000 + arr = ArrowExtensionArray(pa.chunked_array([np.ones((N,), dtype=np.bool_)])) + expected = arr.copy() + arr[np.zeros((N,), dtype=np.bool_)] = False + assert arr._data == expected._data
* BUG: Arrow setitem segfaults when len > 145 000 * Add gh ref * Address review * Restrict to bool type (cherry picked from commit 10000db023208c1db0bba6a7d819bfe87dc49908) - [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52259
2023-03-28T18:43:34Z
2023-03-28T22:54:07Z
2023-03-28T22:54:06Z
2023-03-28T22:54:10Z
DEPR: Deprecate the convert_dtype param in Series.Apply
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index bac567b537edc..b8e849299d262 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -116,6 +116,7 @@ Deprecations - Deprecated 'broadcast_axis' keyword in :meth:`Series.align` and :meth:`DataFrame.align`, upcast before calling ``align`` with ``left = DataFrame({col: left for col in right.columns}, index=right.index)`` (:issue:`51856`) - Deprecated the 'axis' keyword in :meth:`.GroupBy.idxmax`, :meth:`.GroupBy.idxmin`, :meth:`.GroupBy.fillna`, :meth:`.GroupBy.take`, :meth:`.GroupBy.skew`, :meth:`.GroupBy.rank`, :meth:`.GroupBy.cumprod`, :meth:`.GroupBy.cumsum`, :meth:`.GroupBy.cummax`, :meth:`.GroupBy.cummin`, :meth:`.GroupBy.pct_change`, :meth:`GroupBy.diff`, :meth:`.GroupBy.shift`, and :meth:`DataFrameGroupBy.corrwith`; for ``axis=1`` operate on the underlying :class:`DataFrame` instead (:issue:`50405`, :issue:`51046`) - Deprecated :meth:`DataFrame.swapaxes` and :meth:`Series.swapaxes`, use :meth:`DataFrame.transpose` or :meth:`Series.transpose` instead (:issue:`51946`) +- Deprecated parameter ``convert_type`` in :meth:`Series.apply` (:issue:`52140`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index c6aded1b25281..b91dcb0ae663a 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2797,12 +2797,9 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr result[i] = val if convert: - return maybe_convert_objects(result, - try_float=False, - convert_datetime=False, - convert_timedelta=False) - - return result + return maybe_convert_objects(result) + else: + return result @cython.boundscheck(False) @@ -2845,12 +2842,9 @@ def map_infer( result[i] = val if convert: - return maybe_convert_objects(result, - try_float=False, - convert_datetime=False, - convert_timedelta=False) - - return result + return maybe_convert_objects(result) + else: + return result def to_object_array(rows: object, min_width: int = 0) -> ndarray: diff --git a/pandas/core/series.py b/pandas/core/series.py index 40fbc9b74b3f2..8ea5285425875 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4387,7 +4387,7 @@ def transform( def apply( self, func: AggFuncType, - convert_dtype: bool = True, + convert_dtype: bool | lib.NoDefault = lib.no_default, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: @@ -4405,6 +4405,10 @@ def apply( Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. + + .. deprecated:: 2.1.0 + The convert_dtype has been deprecated. Do ``ser.astype(object).apply()`` + instead if you want ``convert_dtype=False``. args : tuple Positional arguments passed to func after the series value. **kwargs @@ -4494,6 +4498,16 @@ def apply( Helsinki 2.484907 dtype: float64 """ + if convert_dtype is lib.no_default: + convert_dtype = True + else: + warnings.warn( + "the convert_dtype parameter is deprecated and will be removed in a " + "future version. Do ``ser.astype(object).apply()`` " + "instead if you want ``convert_dtype=False``.", + FutureWarning, + stacklevel=find_stack_level(), + ) return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 5b6326685d63e..e1c70277f1496 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1780,7 +1780,7 @@ def read( # Decode strings for col, typ in zip(data, self._typlist): if type(typ) is int: - data[col] = data[col].apply(self._decode, convert_dtype=True) + data[col] = data[col].apply(self._decode) data = self._insert_strls(data) diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index bd0167701d08b..733a60ffee1da 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -74,14 +74,15 @@ def f(x): tm.assert_series_equal(result, expected) -def test_apply_dont_convert_dtype(): - s = Series(np.random.randn(10)) +@pytest.mark.parametrize("convert_dtype", [True, False]) +def test_apply_convert_dtype_deprecated(convert_dtype): + ser = Series(np.random.randn(10)) - def f(x): + def func(x): return x if x > 0 else np.nan - result = s.apply(f, convert_dtype=False) - assert result.dtype == object + with tm.assert_produces_warning(FutureWarning): + ser.apply(func, convert_dtype=convert_dtype) def test_apply_args():
Depreates the `convert_dtype` parameter in `Series.apply`. Also does some minor clean-ups in `lib.pyx`. Progress towards #52140.
https://api.github.com/repos/pandas-dev/pandas/pulls/52257
2023-03-28T13:55:49Z
2023-03-30T16:35:38Z
2023-03-30T16:35:38Z
2023-09-27T21:15:00Z
PERF: ArrowExtensionArray._from_sequence
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 5b64937552e41..998aa6b9e6f08 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -251,7 +251,7 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = Fal except pa.ArrowInvalid: # GH50430: let pyarrow infer type, then cast scalars = pa.array(scalars, from_pandas=True) - if pa_dtype: + if pa_dtype and scalars.type != pa_dtype: scalars = scalars.cast(pa_dtype) return cls(scalars)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Avoid an unnecessary cast in `ArrowExtensionArray._from_sequence`. Noticed the time spent there when transposing and arrow-backed dataframe: ``` import pandas as pd import numpy as np import pyarrow as pa data = np.random.randn(10_000, 10) dtype = pd.ArrowDtype(pa.float64()) df = pd.DataFrame(data, dtype=dtype) %timeit df.T # 254 ms ± 14.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- main # 169 ms ± 3.69 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/52256
2023-03-28T10:42:59Z
2023-03-28T18:13:55Z
2023-03-28T18:13:55Z
2023-04-18T11:03:44Z
Backport PR #52204 on branch 2.0.x (DOC: Update timestamp limitations)
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 4cd98c89e7180..2c93efb128613 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -507,7 +507,8 @@ used if a custom frequency string is passed. Timestamp limitations --------------------- -Since pandas represents timestamps in nanosecond resolution, the time span that +The limits of timestamp representation depend on the chosen resolution. For +nanosecond resolution, the time span that can be represented using a 64-bit integer is limited to approximately 584 years: .. ipython:: python @@ -515,6 +516,9 @@ can be represented using a 64-bit integer is limited to approximately 584 years: pd.Timestamp.min pd.Timestamp.max +When choosing second-resolution, the available range grows to ``+/- 2.9e11 years``. +Different resolutions can be converted to each other through ``as_unit``. + .. seealso:: :ref:`timeseries.oob`
Backport PR #52204: DOC: Update timestamp limitations
https://api.github.com/repos/pandas-dev/pandas/pulls/52255
2023-03-28T09:57:47Z
2023-03-28T12:00:57Z
2023-03-28T12:00:57Z
2023-03-28T12:00:58Z
Correct some typos in the repository
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst index 4792d26d021d6..61c9c37a26812 100644 --- a/doc/source/getting_started/index.rst +++ b/doc/source/getting_started/index.rst @@ -533,7 +533,7 @@ Data sets do not only contain numerical data. pandas provides a wide range of fu Coming from... -------------- -Are you familiar with other software for manipulating tablular data? Learn +Are you familiar with other software for manipulating tabular data? Learn the pandas-equivalent operations compared to software you already know: .. panels:: diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index 3ce54cfebf65a..68024fbd05727 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -322,7 +322,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. .. warning:: You should specify all axes in the ``.loc`` specifier, meaning the indexer for the **index** and - for the **columns**. There are some ambiguous cases where the passed indexer could be mis-interpreted + for the **columns**. There are some ambiguous cases where the passed indexer could be misinterpreted   as indexing *both* axes, rather than into say the ``MultiIndex`` for the rows. You should do this: diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index ac4a25728ba5f..ed1689f0c9f79 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -149,7 +149,7 @@ the columns except the one we specify: grouped.sum() The above GroupBy will split the DataFrame on its index (rows). To split by columns, first do -a tranpose: +a transpose: .. ipython:: diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 08618d5a6aa16..3d3a7fa6f0f33 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1491,7 +1491,7 @@ def validate_func_kwargs( Returns ------- columns : List[str] - List of user-provied keys. + List of user-provided keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 54d1497ad05f3..d3bdcee7a7341 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -711,13 +711,13 @@ def register_converter_cb(key) -> None: styler_max_rows = """ : int, optional The maximum number of rows that will be rendered. May still be reduced to - satsify ``max_elements``, which takes precedence. + satisfy ``max_elements``, which takes precedence. """ styler_max_columns = """ : int, optional The maximum number of columns that will be rendered. May still be reduced to - satsify ``max_elements``, which takes precedence. + satisfy ``max_elements``, which takes precedence. """ styler_precision = """ diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 23bc0e6280e27..52606cd7a914e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1697,7 +1697,7 @@ def pandas_dtype(dtype) -> DtypeObj: try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show - # numpy deprication warning of np.integer + # numpy deprecation warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 54f6d84c8dc2a..d302085275757 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -901,7 +901,7 @@ def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: return freq_offset raise TypeError( - "PeriodDtype argument should be string or BaseOffet, " + "PeriodDtype argument should be string or BaseOffset, " f"got {type(freq).__name__}" ) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0e9826fe8b63a..72fd7fadd0987 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6559,7 +6559,7 @@ def infer_objects(self, copy: bool_t | None = None) -> Self: Parameters ---------- copy : bool, default True - Whether to make a copy for non-object or non-inferrable columns + Whether to make a copy for non-object or non-inferable columns or Series. Returns diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 64076e4952cde..bfe21082cc4d0 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -89,7 +89,7 @@ def get_sheet_data( file_rows_needed: int | None = None, ) -> list[list[Scalar]]: data: list[list[Scalar]] = [] - prevous_row_number = -1 + previous_row_number = -1 # When sparse=True the rows can have different lengths and empty rows are # not returned. The cells are namedtuples of row, col, value (r, c, v). for row in sheet.rows(sparse=True): @@ -99,9 +99,9 @@ def get_sheet_data( # trim trailing empty elements converted_row.pop() if converted_row: - data.extend([[]] * (row_number - prevous_row_number - 1)) + data.extend([[]] * (row_number - previous_row_number - 1)) data.append(converted_row) - prevous_row_number = row_number + previous_row_number = row_number if file_rows_needed is not None and len(data) >= file_rows_needed: break if data: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index fc12a8b0722e6..0bd8769b5de60 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -740,7 +740,7 @@ def _calc_max_rows_fitted(self) -> int | None: _, height = get_terminal_size() if self.max_rows == 0: # rows available to fill with actual data - return height - self._get_number_of_auxillary_rows() + return height - self._get_number_of_auxiliary_rows() if self._is_screen_short(height): max_rows = height @@ -775,7 +775,7 @@ def _is_screen_narrow(self, max_width) -> bool: def _is_screen_short(self, max_height) -> bool: return bool(self.max_rows == 0 and len(self.frame) > max_height) - def _get_number_of_auxillary_rows(self) -> int: + def _get_number_of_auxiliary_rows(self) -> int: """Get number of rows occupied by prompt, dots and dimension info.""" dot_row = 1 prompt_row = 1 diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index bfe65f8bf3c29..de88960280102 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -86,11 +86,11 @@ def test_comparisons(self, factor): cat_rev > cat_rev_base2 # Only categories with same ordering information can be compared - cat_unorderd = cat.set_ordered(False) + cat_unordered = cat.set_ordered(False) assert not (cat > cat).any() with pytest.raises(TypeError, match=msg): - cat > cat_unorderd + cat > cat_unordered # comparison (in both directions) with Series will raise s = Series(["b", "b", "b"]) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 9057d91b1960a..e862a6985160b 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -523,7 +523,7 @@ def test_freq_argument_required(self): with pytest.raises(TypeError, match=msg): PeriodDtype() - msg = "PeriodDtype argument should be string or BaseOffet, got NoneType" + msg = "PeriodDtype argument should be string or BaseOffset, got NoneType" with pytest.raises(TypeError, match=msg): # GH#51790 PeriodDtype(None) diff --git a/pandas/tests/frame/methods/test_isetitem.py b/pandas/tests/frame/methods/test_isetitem.py index e8064cbc44d5f..69f394afb6519 100644 --- a/pandas/tests/frame/methods/test_isetitem.py +++ b/pandas/tests/frame/methods/test_isetitem.py @@ -38,7 +38,7 @@ def test_isetitem_ea_df_scalar_indexer(self): ) tm.assert_frame_equal(df, expected) - def test_isetitem_dimension_missmatch(self): + def test_isetitem_dimension_mismatch(self): # GH#51701 df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) value = df.copy() diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index b581dfd8c44b0..40c8e4fa27f90 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -2006,7 +2006,7 @@ def test_inplace_arithmetic_series_update(using_copy_on_write): tm.assert_frame_equal(df, expected) -def test_arithemetic_multiindex_align(): +def test_arithmetic_multiindex_align(): """ Regression test for: https://github.com/pandas-dev/pandas/issues/33765 """ diff --git a/pandas/tests/frame/test_npfuncs.py b/pandas/tests/frame/test_npfuncs.py index 0b7699e46d720..b40f953cd800e 100644 --- a/pandas/tests/frame/test_npfuncs.py +++ b/pandas/tests/frame/test_npfuncs.py @@ -11,7 +11,7 @@ class TestAsArray: - def test_asarray_homogenous(self): + def test_asarray_homogeneous(self): df = DataFrame({"A": Categorical([1, 2]), "B": Categorical([1, 2])}) result = np.asarray(df) # may change from object in the future diff --git a/pandas/tests/frame/test_unary.py b/pandas/tests/frame/test_unary.py index a9ec726ab443e..07bcb2ccc121a 100644 --- a/pandas/tests/frame/test_unary.py +++ b/pandas/tests/frame/test_unary.py @@ -84,7 +84,7 @@ def test_invert_mixed(self): ) tm.assert_frame_equal(result, expected) - def test_invert_empy_not_input(self): + def test_invert_empty_not_input(self): # GH#51032 df = pd.DataFrame() result = ~df diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 3f0150a2186a9..d580b89f2f006 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -603,12 +603,12 @@ def test_filter_non_bool_raises(): def test_filter_dropna_with_empty_groups(): # GH 10780 data = Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3)) - groupped = data.groupby(level=0) - result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False) + grouped = data.groupby(level=0) + result_false = grouped.filter(lambda x: x.mean() > 1, dropna=False) expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3)) tm.assert_series_equal(result_false, expected_false) - result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True) + result_true = grouped.filter(lambda x: x.mean() > 1, dropna=True) expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64) tm.assert_series_equal(result_true, expected_true) diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index 9b36423be73dd..c06a6fcc2a037 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -320,7 +320,7 @@ def test_readjson_nrows_chunks(request, nrows, chunksize, engine): def test_readjson_nrows_requires_lines(engine): # GH 33916 - # Test ValuError raised if nrows is set without setting lines in read_json + # Test ValueError raised if nrows is set without setting lines in read_json jsonl = """{"a": 1, "b": 2} {"a": 3, "b": 4} {"a": 5, "b": 6} diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 1635c79de9abb..1a0a5dfe213bd 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -983,12 +983,12 @@ def test_df_axis_param_depr(): index.name = "date" df = DataFrame(np.random.rand(10, 2), columns=list("AB"), index=index).T - # Deprication error when axis=1 is explicitly passed + # Deprecation error when axis=1 is explicitly passed warning_msg = "DataFrame.resample with axis=1 is deprecated." with tm.assert_produces_warning(FutureWarning, match=warning_msg): df.resample("M", axis=1) - # Deprication error when axis=0 is explicitly passed + # Deprecation error when axis=0 is explicitly passed df = df.T warning_msg = ( "The 'axis' keyword in DataFrame.resample is deprecated and " diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 44b02310eb8a7..32d789c118321 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -338,7 +338,7 @@ def test_concat_mixed_objs(self): result = concat([s1, df, s2], ignore_index=True) tm.assert_frame_equal(result, expected) - def test_dtype_coerceion(self): + def test_dtype_coercion(self): # 12411 df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 7e4002dc3a0cf..02244c1686cab 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -730,7 +730,7 @@ def test_constructor_fromisocalendar(self): assert isinstance(result, Timestamp) -def test_constructor_ambigous_dst(): +def test_constructor_ambiguous_dst(): # GH 24329 # Make sure that calling Timestamp constructor # on Timestamp created from ambiguous time diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 2fee395886cff..0a43db87674af 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -29,7 +29,7 @@ class TestTimestampUnaryOps: # -------------------------------------------------------------- - def test_round_divison_by_zero_raises(self): + def test_round_division_by_zero_raises(self): ts = Timestamp("2016-01-01") msg = "Division by zero in rounding" diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 91d6be01eef16..0f044ae576af8 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -576,7 +576,7 @@ def test_getitem_dataframe_raises(): ser[df > 5] -def test_getitem_assignment_series_aligment(): +def test_getitem_assignment_series_alignment(): # https://github.com/pandas-dev/pandas/issues/37427 # with getitem, when assigning with a Series, it is not first aligned ser = Series(range(10)) diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index c1579dbbbc21a..ea239c753ecb5 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -189,7 +189,7 @@ def test_sort_values_ignore_index( tm.assert_series_equal(result_ser, expected) tm.assert_series_equal(ser, Series(original_list)) - def test_mergesort_decending_stability(self): + def test_mergesort_descending_stability(self): # GH 28697 s = Series([1, 2, 1, 3], ["first", "b", "second", "c"]) result = s.sort_values(ascending=False, kind="mergesort") diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1ec8d990add3a..bcb1fe35eaa28 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -82,7 +82,7 @@ def test_infer_with_date_and_datetime(self): expected = Index(vals, dtype=object) tm.assert_index_equal(idx, expected) - def test_unparseable_strings_with_dt64_dtype(self): + def test_unparsable_strings_with_dt64_dtype(self): # pre-2.0 these would be silently ignored and come back with object dtype vals = ["aa"] msg = "^Unknown datetime string format, unable to parse: aa, at position 0$" diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index f7d41ed536a40..250bee02e06f4 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -146,7 +146,7 @@ def test_index_equal_values_too_far(check_exact, rtol): @pytest.mark.parametrize("check_order", [True, False]) -def test_index_equal_value_oder_mismatch(check_exact, rtol, check_order): +def test_index_equal_value_order_mismatch(check_exact, rtol, check_order): idx1 = Index([1, 2, 3]) idx2 = Index([3, 2, 1]) diff --git a/pyproject.toml b/pyproject.toml index 2aadfd7bd41ef..ac6a4a7b2a61b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -282,7 +282,7 @@ ignore = [ "B904", # Magic number "PLR2004", - # Consider `elif` instead of `else` then `if` to remove indendation level + # Consider `elif` instead of `else` then `if` to remove indentation level "PLR5501", ]
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This PR corrects some typos that I've found in the repository.
https://api.github.com/repos/pandas-dev/pandas/pulls/52254
2023-03-28T02:21:57Z
2023-03-28T18:15:36Z
2023-03-28T18:15:36Z
2023-03-28T18:15:45Z
BUG #52197 proposed fix
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 71fda39a05e55..9f37d12d0fa56 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -208,6 +208,7 @@ I/O ^^^ - Bug in :func:`read_html`, tail texts were removed together with elements containing ``display:none`` style (:issue:`51629`) - :meth:`DataFrame.to_orc` now raising ``ValueError`` when non-default :class:`Index` is given (:issue:`51828`) +- Bug in :func:`read_html`, style elements were read into DataFrames (:issue:`52197`) - Period diff --git a/pandas/io/html.py b/pandas/io/html.py index ce95c2be8581f..02661329b58de 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -582,7 +582,6 @@ def __init__(self, *args, **kwargs) -> None: def _parse_tables(self, doc, match, attrs): element_name = self._strainer.name tables = doc.find_all(element_name, attrs=attrs) - if not tables: raise ValueError("No tables found") @@ -592,13 +591,15 @@ def _parse_tables(self, doc, match, attrs): for table in tables: if self.displayed_only: + for elem in table.find_all("style"): + elem.decompose() + for elem in table.find_all(style=re.compile(r"display:\s*none")): elem.decompose() if table not in unique_tables and table.find(string=match) is not None: result.append(table) unique_tables.add(table) - if not result: raise ValueError(f"No tables found matching pattern {repr(match.pattern)}") return result @@ -730,10 +731,11 @@ def _parse_tables(self, doc, match, kwargs): # lxml utilizes XPATH 1.0 which does not have regex # support. As a result, we find all elements with a style # attribute and iterate them to check for display:none + for elem in table.xpath(".//style"): + elem.drop_tree() for elem in table.xpath(".//*[@style]"): if "display:none" in elem.attrib.get("style", "").replace(" ", ""): elem.drop_tree() - if not tables: raise ValueError(f"No tables found matching regex {repr(pattern)}") return tables @@ -1170,6 +1172,7 @@ def read_html( '{None, "header", "footer", "body", "all"}, got ' f'"{extract_links}"' ) + validate_header_arg(header) check_dtype_backend(dtype_backend) diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 252f028e0dffc..047918d4694e0 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1495,3 +1495,28 @@ def test_invalid_dtype_backend(self): ) with pytest.raises(ValueError, match=msg): read_html("test", dtype_backend="numpy") + + def test_style_tag(self): + # GH 48316 + data = """ + <table> + <tr> + <th> + <style>.style</style> + A + </th> + <th>B</th> + </tr> + <tr> + <td>A1</td> + <td>B1</td> + </tr> + <tr> + <td>A2</td> + <td>B2</td> + </tr> + </table> + """ + result = self.read_html(data)[0] + expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"]) + tm.assert_frame_equal(result, expected)
- [x] closes #52197 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52251
2023-03-27T23:17:49Z
2023-03-28T18:23:57Z
2023-03-28T18:23:57Z
2023-03-28T18:24:07Z
REGR: Revert GH51335
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 80b52d3b3955e..02d6a3c4312cc 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -777,7 +777,7 @@ Other API changes - The levels of the index of the :class:`Series` returned from ``Series.sparse.from_coo`` now always have dtype ``int32``. Previously they had dtype ``int64`` (:issue:`50926`) - :func:`to_datetime` with ``unit`` of either "Y" or "M" will now raise if a sequence contains a non-round ``float`` value, matching the ``Timestamp`` behavior (:issue:`50301`) - The methods :meth:`Series.round`, :meth:`DataFrame.__invert__`, :meth:`Series.__invert__`, :meth:`DataFrame.swapaxes`, :meth:`DataFrame.first`, :meth:`DataFrame.last`, :meth:`Series.first`, :meth:`Series.last` and :meth:`DataFrame.align` will now always return new objects (:issue:`51032`) -- :class:`DataFrame` and :class:`DataFrameGroupBy` aggregations (e.g. "sum") with object-dtype columns no longer infer non-object dtypes for their results, explicitly call ``result.infer_objects(copy=False)`` on the result to obtain the old behavior (:issue:`51205`, :issue:`49603`) +- :class:`DataFrameGroupBy` aggregations (e.g. "sum") with object-dtype columns no longer infer non-object dtypes for their results, explicitly call ``result.infer_objects(copy=False)`` on the result to obtain the old behavior (:issue:`51205`, :issue:`49603`) - Division by zero with :class:`ArrowDtype` dtypes returns ``-inf``, ``nan``, or ``inf`` depending on the numerator, instead of raising (:issue:`51541`) - Added :func:`pandas.api.types.is_any_real_numeric_dtype` to check for real numeric dtypes (:issue:`51152`) - :meth:`~arrays.ArrowExtensionArray.value_counts` now returns data with :class:`ArrowDtype` with ``pyarrow.int64`` type instead of ``"Int64"`` type (:issue:`51462`) @@ -1204,11 +1204,11 @@ Numeric ^^^^^^^ - Bug in :meth:`DataFrame.add` cannot apply ufunc when inputs contain mixed DataFrame type and Series type (:issue:`39853`) - Bug in arithmetic operations on :class:`Series` not propagating mask when combining masked dtypes and numpy dtypes (:issue:`45810`, :issue:`42630`) +- Bug in DataFrame reduction methods (e.g. :meth:`DataFrame.sum`) with object dtype, ``axis=1`` and ``numeric_only=False`` would not be coerced to float (:issue:`49551`) - Bug in :meth:`DataFrame.sem` and :meth:`Series.sem` where an erroneous ``TypeError`` would always raise when using data backed by an :class:`ArrowDtype` (:issue:`49759`) - Bug in :meth:`Series.__add__` casting to object for list and masked :class:`Series` (:issue:`22962`) - Bug in :meth:`~arrays.ArrowExtensionArray.mode` where ``dropna=False`` was not respected when there was ``NA`` values (:issue:`50982`) - Bug in :meth:`DataFrame.query` with ``engine="numexpr"`` and column names are ``min`` or ``max`` would raise a ``TypeError`` (:issue:`50937`) -- Bug in :meth:`DataFrame.min` and :meth:`DataFrame.max` with tz-aware data containing ``pd.NaT`` and ``axis=1`` would return incorrect results (:issue:`51242`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bcba7c8c13f8c..96048a454819f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -102,6 +102,7 @@ is_integer_dtype, is_iterator, is_list_like, + is_object_dtype, is_scalar, is_sequence, needs_i8_conversion, @@ -10925,44 +10926,54 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - # Case with EAs see GH#35881 - df = self - if numeric_only: - df = _get_data() + if numeric_only or axis == 0: + # For numeric_only non-None and axis non-None, we know + # which blocks to use and no try/except is needed. + # For numeric_only=None only the case with axis==0 and no object + # dtypes are unambiguous can be handled with BlockManager.reduce + # Case with EAs see GH#35881 + df = self + if numeric_only: + df = _get_data() + if axis == 1: + df = df.T + axis = 0 + + # After possibly _get_data and transposing, we are now in the + # simple case where we can use BlockManager.reduce + res = df._mgr.reduce(blk_func) + out = df._constructor(res).iloc[0] + if out_dtype is not None: + out = out.astype(out_dtype) + if axis == 0 and len(self) == 0 and name in ["sum", "prod"]: + # Even if we are object dtype, follow numpy and return + # float64, see test_apply_funcs_over_empty + out = out.astype(np.float64) + + return out + + assert not numeric_only and axis in (1, None) + + data = self + values = data.values + result = func(values) + + if hasattr(result, "dtype"): + if filter_type == "bool" and notna(result).all(): + result = result.astype(np.bool_) + elif filter_type is None and is_object_dtype(result.dtype): + try: + result = result.astype(np.float64) + except (ValueError, TypeError): + # try to coerce to the original dtypes item by item if we can + pass + if axis is None: - return func(df.values) - elif axis == 1: - if len(df.index) == 0: - # Taking a transpose would result in no columns, losing the dtype. - # In the empty case, reducing along axis 0 or 1 gives the same - # result dtype, so reduce with axis=0 and ignore values - result = df._reduce( - op, - name, - axis=0, - skipna=skipna, - numeric_only=False, - filter_type=filter_type, - **kwds, - ).iloc[:0] - result.index = df.index - return result - df = df.T - - # After possibly _get_data and transposing, we are now in the - # simple case where we can use BlockManager.reduce - res = df._mgr.reduce(blk_func) - out = df._constructor(res).iloc[0] - if out_dtype is not None: - out = out.astype(out_dtype) - elif (df._mgr.get_dtypes() == object).any(): - out = out.astype(object) - elif len(self) == 0 and name in ("sum", "prod"): - # Even if we are object dtype, follow numpy and return - # float64, see test_apply_funcs_over_empty - out = out.astype(np.float64) + return result - return out + labels = self._get_agg_axis(axis) + result = self._constructor_sliced(result, index=labels) + return result def _reduce_axis1(self, name: str, func, skipna: bool) -> Series: """ diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 28809e2ecb788..b40ba4bf48eaa 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -317,8 +317,16 @@ def wrapper(x): DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object), ], ) - def test_stat_operators_attempt_obj_array(self, method, df, axis): + def test_stat_operators_attempt_obj_array( + self, method, df, axis, request, using_array_manager + ): # GH#676 + if ( + axis in (1, "columns") + or method not in ("sum", "prod", "min", "max") + or using_array_manager + ): + request.node.add_marker(pytest.mark.xfail(reason="Revert of GH#51335")) assert df.values.dtype == np.object_ result = getattr(df, method)(axis=axis) expected = getattr(df.astype("f8"), method)(axis=axis).astype(object) @@ -402,6 +410,7 @@ def test_mean_includes_datetimes(self, tz): expected = Series([Timestamp("2000", tz=tz)], index=["A"]) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(reason="Revert of GH#51335") def test_mean_mixed_string_decimal(self): # GH 11670 # possible bug when calculating mean of DataFrame? @@ -731,7 +740,9 @@ def test_sum_corner(self): tm.makePeriodIndex(0), ], ) - def test_axis_1_empty(self, all_reductions, index, using_array_manager): + def test_axis_1_empty(self, all_reductions, index, using_array_manager, request): + if all_reductions not in ("count", "any", "all"): + request.node.add_marker(pytest.mark.xfail(reason="Revert of GH#51335")) df = DataFrame(columns=["a"], index=index) result = getattr(df, all_reductions)(axis=1) if all_reductions in ("any", "all"): @@ -1464,6 +1475,7 @@ def test_preserve_timezone(self, initial: str, method): result = getattr(df, method)(axis=1) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(reason="GH#51335") @pytest.mark.parametrize("method", ["min", "max"]) def test_minmax_tzaware_skipna_axis_1(self, method, skipna): # GH#51242 @@ -1671,9 +1683,10 @@ def test_prod_sum_min_count_mixed_object(): @pytest.mark.parametrize("method", ["min", "max", "mean", "median", "skew", "kurt"]) @pytest.mark.parametrize("numeric_only", [True, False]) -def test_reduction_axis_none_returns_scalar(method, numeric_only): +def test_reduction_axis_none_returns_scalar(method, numeric_only, request): # GH#21597 As of 2.0, axis=None reduces over all axes. - + if numeric_only: + request.node.add_marker(pytest.mark.xfail(reason="Revert of GH#51335")) df = DataFrame(np.random.randn(4, 4)) result = getattr(df, method)(axis=None, numeric_only=numeric_only)
Ref: https://github.com/pandas-dev/pandas/pull/51955#issuecomment-1478685300 - [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. @jbrockmendel - this only reverts the change to `_reduce`, but leaves other changes untouched. Would you prefer to revert all of #51335?
https://api.github.com/repos/pandas-dev/pandas/pulls/52250
2023-03-27T22:36:41Z
2023-04-08T12:28:41Z
null
2023-04-16T13:32:06Z
TYP: remove mypy ignore from array_manager.py
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 407e16e1fa187..0925f3a3cee1f 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -834,7 +834,7 @@ def iset( # multiple columns -> convert slice or array to integer indices elif isinstance(loc, slice): - indices = range( + indices: range | np.ndarray = range( loc.start if loc.start is not None else 0, loc.stop if loc.stop is not None else self.shape_proper[1], loc.step if loc.step is not None else 1, @@ -842,9 +842,7 @@ def iset( else: assert isinstance(loc, np.ndarray) assert loc.dtype == "bool" - # error: Incompatible types in assignment (expression has type "ndarray", - # variable has type "range") - indices = np.nonzero(loc)[0] # type: ignore[assignment] + indices = np.nonzero(loc)[0] assert value.ndim == 2 assert value.shape[0] == len(self._axes[0])
Related to #37715 mypy ignore was removed from pandas/core/internals/array_manager.py
https://api.github.com/repos/pandas-dev/pandas/pulls/52249
2023-03-27T21:53:06Z
2023-03-29T21:42:17Z
2023-03-29T21:42:16Z
2023-03-30T01:08:20Z
CLN: Assorted
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index adb920e0cca6d..94ad2aa3a751f 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -51,10 +51,12 @@ from pandas._libs.khash cimport ( kh_resize_int64, khiter_t, ) +from pandas._libs.missing cimport ( + checknull, + isnaobj, +) from pandas._libs.util cimport get_nat -import pandas._libs.missing as missing - cdef: float64_t FP_ERR = 1e-13 float64_t NaN = <float64_t>np.NaN @@ -95,10 +97,10 @@ class Infinity: def __gt__(self, other): return (not isinstance(other, Infinity) and - not missing.checknull(other)) + not checknull(other)) def __ge__(self, other): - return not missing.checknull(other) + return not checknull(other) class NegInfinity: @@ -107,10 +109,10 @@ class NegInfinity: """ def __lt__(self, other): return (not isinstance(other, NegInfinity) and - not missing.checknull(other)) + not checknull(other)) def __le__(self, other): - return not missing.checknull(other) + return not checknull(other) def __eq__(self, other): return isinstance(other, NegInfinity) @@ -988,7 +990,7 @@ def rank_1d( if mask is not None: pass elif numeric_object_t is object: - mask = missing.isnaobj(masked_vals) + mask = isnaobj(masked_vals) elif numeric_object_t is int64_t and is_datetimelike: mask = (masked_vals == NPY_NAT).astype(np.uint8) elif numeric_object_t is float64_t or numeric_object_t is float32_t: @@ -1366,7 +1368,7 @@ def rank_2d( nan_fill_val = get_rank_nan_fill_val(nans_rank_highest, <numeric_object_t>0) if numeric_object_t is object: - mask = missing.isnaobj(values).view(np.uint8) + mask = isnaobj(values).view(np.uint8) elif numeric_object_t is float64_t or numeric_object_t is float32_t: mask = np.isnan(values).view(np.uint8) else: diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index a95e92923cd00..f438ddbf3de1f 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -49,7 +49,6 @@ from pandas._libs.missing cimport checknull cdef int64_t NPY_NAT = util.get_nat() -_int64_max = np.iinfo(np.int64).max cdef float64_t NaN = <float64_t>np.NaN @@ -256,9 +255,9 @@ def group_cumprod( Always false, `values` is never datetime-like. skipna : bool If true, ignore nans in `values`. - mask: np.ndarray[uint8], optional + mask : np.ndarray[uint8], optional Mask of values - result_mask: np.ndarray[int8], optional + result_mask : np.ndarray[int8], optional Mask of out array Notes @@ -345,9 +344,9 @@ def group_cumsum( True if `values` contains datetime-like entries. skipna : bool If true, ignore nans in `values`. - mask: np.ndarray[uint8], optional + mask : np.ndarray[uint8], optional Mask of values - result_mask: np.ndarray[int8], optional + result_mask : np.ndarray[int8], optional Mask of out array Notes @@ -615,7 +614,7 @@ def group_any_all( # value encountered is True flag_val = 1 else: - raise ValueError("'bool_func' must be either 'any' or 'all'!") + raise ValueError("'val_test' must be either 'any' or 'all'!") out[:] = 1 - flag_val @@ -1036,7 +1035,7 @@ def group_ohlc( raise NotImplementedError("Argument 'values' must have only one dimension") if int64float_t is float32_t or int64float_t is float64_t: - out[:] = np.nan + out[:] = NAN else: out[:] = 0 diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index c6aded1b25281..616a9bddc24ac 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2428,7 +2428,7 @@ def maybe_convert_objects(ndarray[object] objects, Seen seen = Seen() object val _TSObject tsobj - float64_t fnan = np.nan + float64_t fnan = NaN if dtype_if_all_nat is not None: # in practice we don't expect to ever pass dtype_if_all_nat diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 946ba5ddaa248..c669bf349162d 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -9,6 +9,7 @@ from numpy cimport ( import numpy as np cimport numpy as cnp +from numpy.math cimport NAN cnp.import_array() @@ -129,7 +130,7 @@ def explode(ndarray[object] values): count += 1 else: # empty list-like, use a nan marker - result[count] = np.nan + result[count] = NAN count += 1 else: # replace with the existing scalar diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 74f7653ebbe0c..0918ecc977a3a 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -1,4 +1,5 @@ cimport cython + import numpy as np cimport numpy as cnp @@ -10,16 +11,14 @@ from numpy cimport ( ndarray, uint8_t, ) +from numpy.math cimport ( + INFINITY as INF, + NAN as NaN, +) cnp.import_array() -# ----------------------------------------------------------------------------- -# Preamble stuff - -cdef float64_t NaN = <float64_t>np.NaN -cdef float64_t INF = <float64_t>np.inf - # ----------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 3873e0c848145..b162f278fcbec 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -150,12 +150,13 @@ def get_date_name_field( name based on requested field (e.g. day_name) """ cdef: - Py_ssize_t i, count = dtindex.shape[0] + Py_ssize_t i + cnp.npy_intp count = dtindex.shape[0] ndarray[object] out, names npy_datetimestruct dts int dow - out = np.empty(count, dtype=object) + out = cnp.PyArray_EMPTY(1, &count, cnp.NPY_OBJECT, 0) if field == "day_name": if locale is None: diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 6105f96a3b1b8..4c4e3dfa4bf76 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -270,11 +270,12 @@ cdef object _get_utc_trans_times_from_dateutil_tz(tzinfo tz): cdef int64_t[::1] unbox_utcoffsets(object transinfo): cdef: - Py_ssize_t i, sz + Py_ssize_t i + cnp.npy_intp sz int64_t[::1] arr sz = len(transinfo) - arr = np.empty(sz, dtype="i8") + arr = cnp.PyArray_EMPTY(1, &sz, cnp.NPY_INT64, 0) for i in range(sz): arr[i] = int(transinfo[i][0].total_seconds()) * 1_000_000_000 diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index f424b74c6e577..0a19092f57706 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -1,14 +1,11 @@ cimport cython +cimport numpy as cnp from cpython.datetime cimport ( date, datetime, time, tzinfo, ) - -import numpy as np - -cimport numpy as cnp from numpy cimport ( int64_t, ndarray, @@ -101,7 +98,7 @@ def ints_to_pydatetime( tzinfo tz=None, str box="datetime", NPY_DATETIMEUNIT reso=NPY_FR_ns, -) -> np.ndarray: +) -> ndarray: # stamps is int64, arbitrary ndim """ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp. diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 334400cc13201..2e82cabc76994 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1078,7 +1078,9 @@ def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray: return other + self @final - def _sub_datetimelike_scalar(self, other: datetime | np.datetime64): + def _sub_datetimelike_scalar( + self, other: datetime | np.datetime64 + ) -> TimedeltaArray: if self.dtype.kind != "M": raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") @@ -1095,7 +1097,7 @@ def _sub_datetimelike_scalar(self, other: datetime | np.datetime64): return self._sub_datetimelike(ts) @final - def _sub_datetime_arraylike(self, other: DatetimeArray): + def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray: if self.dtype.kind != "M": raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") @@ -1296,7 +1298,7 @@ def _addsub_object_array(self, other: npt.NDArray[np.object_], op): res_values = op(self.astype("O"), np.asarray(other)) return res_values - def _accumulate(self, name: str, *, skipna: bool = True, **kwargs): + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self: if name not in {"cummin", "cummax"}: raise TypeError(f"Accumulation {name} not supported for {type(self)}") @@ -2015,7 +2017,7 @@ def round( freq, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", - ): + ) -> Self: return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) @Appender((_round_doc + _floor_example).format(op="floor")) @@ -2024,7 +2026,7 @@ def floor( freq, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", - ): + ) -> Self: return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) @Appender((_round_doc + _ceil_example).format(op="ceil")) @@ -2033,7 +2035,7 @@ def ceil( freq, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", - ): + ) -> Self: return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) # -------------------------------------------------------------- @@ -2054,7 +2056,7 @@ def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: def _maybe_clear_freq(self) -> None: self._freq = None - def _with_freq(self, freq): + def _with_freq(self, freq) -> Self: """ Helper to get a view on the same data, with a new freq. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 4bd95da2b6b07..2508bad80dc26 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -353,7 +353,9 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = Fal else: if hasattr(scalars, "type"): - # pyarrow array + # pyarrow array; we cannot rely on the "to_numpy" check in + # ensure_string_array because calling scalars.to_numpy would set + # zero_copy_only to True which caused problems see GH#52076 scalars = np.array(scalars) # convert non-na-likes to str, and nan-likes to StringDtype().na_value result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c81ebc06ba753..9a74da33db531 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -112,7 +112,6 @@ _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max -_int64_max = np.iinfo(np.int64).max _dtype_obj = np.dtype(object) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bcba7c8c13f8c..0ff7161ca0459 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -865,6 +865,7 @@ def __init__( NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- + def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: @@ -1029,16 +1030,10 @@ def _repr_fits_vertical_(self) -> bool: max_rows = get_option("display.max_rows") return len(self) <= max_rows - def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: + def _repr_fits_horizontal_(self) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. - - In case of non-interactive session, no boundaries apply. - - `ignore_width` is here so ipynb+HTML output can behave the way - users expect. display.max_columns remains in effect. - GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") @@ -1046,13 +1041,13 @@ def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: # exceed max columns if (max_columns and nb_columns > max_columns) or ( - (not ignore_width) and width and nb_columns > (width // 2) + width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims - if ignore_width or width is None or not console.in_interactive_session(): + if width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): @@ -4928,65 +4923,6 @@ def _series(self): # ---------------------------------------------------------------------- # Reindexing and alignment - def _reindex_axes( - self, axes, level, limit: int | None, tolerance, method, fill_value, copy - ): - frame = self - - columns = axes["columns"] - if columns is not None: - frame = frame._reindex_columns( - columns, method, copy, level, fill_value, limit, tolerance - ) - - index = axes["index"] - if index is not None: - frame = frame._reindex_index( - index, method, copy, level, fill_value, limit, tolerance - ) - - return frame - - def _reindex_index( - self, - new_index, - method, - copy: bool, - level: Level, - fill_value=np.nan, - limit: int | None = None, - tolerance=None, - ): - new_index, indexer = self.index.reindex( - new_index, method=method, level=level, limit=limit, tolerance=tolerance - ) - return self._reindex_with_indexers( - {0: [new_index, indexer]}, - copy=copy, - fill_value=fill_value, - allow_dups=False, - ) - - def _reindex_columns( - self, - new_columns, - method, - copy: bool, - level: Level, - fill_value=None, - limit: int | None = None, - tolerance=None, - ): - new_columns, indexer = self.columns.reindex( - new_columns, method=method, level=level, limit=limit, tolerance=tolerance - ) - return self._reindex_with_indexers( - {1: [new_columns, indexer]}, - copy=copy, - fill_value=fill_value, - allow_dups=False, - ) - def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: @@ -7502,7 +7438,9 @@ def _arith_method(self, other, op): _logical_method = _arith_method - def _dispatch_frame_op(self, right, func: Callable, axis: AxisInt | None = None): + def _dispatch_frame_op( + self, right, func: Callable, axis: AxisInt | None = None + ) -> DataFrame: """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. @@ -7667,7 +7605,7 @@ def _should_reindex_frame_op(self, right, op, axis: int, fill_value, level) -> b return False def _align_for_op( - self, other, axis, flex: bool | None = False, level: Level = None + self, other, axis: AxisInt, flex: bool | None = False, level: Level = None ): """ Convert rhs to meet lhs dims if input is list, tuple or np.ndarray. @@ -7676,7 +7614,7 @@ def _align_for_op( ---------- left : DataFrame right : Any - axis : int, str, or None + axis : int flex : bool or None, default False Whether this is a flex op, in which case we reindex. None indicates not to check for alignment. @@ -7703,7 +7641,7 @@ def to_series(right): # datetime64[h] ndarray dtype = object - if axis is not None and left._get_axis_number(axis) == 0: + if axis == 0: if len(left.index) != len(right): raise ValueError( msg.format(req_len=len(left.index), given_len=len(right)) @@ -7780,8 +7718,6 @@ def to_series(right): ) elif isinstance(right, Series): # axis=1 is default for DataFrame-with-Series op - axis = left._get_axis_number(axis) if axis is not None else 1 - if not flex: if not left.axes[axis].equals(right.index): raise ValueError( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 72fd7fadd0987..02a2a202939c0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -242,10 +242,7 @@ class NDFrame(PandasObject, indexing.IndexingMixin): "_item_cache", "_cache", "_is_copy", - "_subtyp", "_name", - "_default_kind", - "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", @@ -281,6 +278,7 @@ def __init__( object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) + @final @classmethod def _init_mgr( cls, @@ -622,6 +620,7 @@ def axes(self) -> list[Index]: # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] + @final @property def ndim(self) -> int: """ @@ -645,6 +644,7 @@ def ndim(self) -> int: """ return self._mgr.ndim + @final @property def size(self) -> int: """ @@ -4673,7 +4673,7 @@ def _drop_axis( ) result = self._constructor(new_mgr) if self.ndim == 1: - result.name = self.name + result._name = self.name return result.__finalize__(self) @@ -5397,8 +5397,16 @@ def reindex( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") + @final def _reindex_axes( - self, axes, level, limit, tolerance, method, fill_value, copy + self, + axes, + level: Level | None, + limit: int | None, + tolerance, + method, + fill_value: Scalar | None, + copy: bool_t | None, ) -> Self: """Perform the reindex for all the axes.""" obj = self @@ -5424,7 +5432,7 @@ def _reindex_axes( return obj - def _needs_reindex_multi(self, axes, method, level) -> bool_t: + def _needs_reindex_multi(self, axes, method, level: Level | None) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) @@ -9527,6 +9535,7 @@ def align( if broadcast_axis is not lib.no_default: # GH#51856 + # TODO(3.0): enforcing this deprecation will close GH#13194 msg = ( f"The 'broadcast_axis' keyword in {type(self).__name__}.align is " "deprecated and will be removed in a future version." diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index dad188e2d9304..6f361ff867c35 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -234,7 +234,7 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: return values # TODO: general case implementation overridable by EAs. - def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False): + def _disallow_invalid_ops(self, dtype: DtypeObj): """ Check if we can do this operation with our cython functions. @@ -247,7 +247,7 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False): """ how = self.how - if is_numeric: + if is_numeric_dtype(dtype): # never an invalid op for those dtypes, so return early as fastpath return @@ -711,12 +711,9 @@ def cython_operation( # as we can have 1D ExtensionArrays that we need to treat as 2D assert axis == 0 - dtype = values.dtype - is_numeric = is_numeric_dtype(dtype) - # can we do this operation with our cython functions # if not raise NotImplementedError - self._disallow_invalid_ops(dtype, is_numeric) + self._disallow_invalid_ops(values.dtype) if not isinstance(values, np.ndarray): # i.e. ExtensionArray diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index eb79278eb35d9..81beddac7c432 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -936,7 +936,7 @@ def dtype(self) -> DtypeObj: return self._data.dtype @final - def ravel(self, order: str_t = "C") -> Index: + def ravel(self, order: str_t = "C") -> Self: """ Return a view on self. diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1133ea6be26ac..eacf979dde3fc 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -280,7 +280,7 @@ def _partial_date_slice( self, reso: Resolution, parsed: datetime, - ): + ) -> slice | npt.NDArray[np.intp]: """ Parameters ---------- @@ -488,10 +488,10 @@ def _as_range_index(self) -> RangeIndex: rng = range(self[0]._value, self[-1]._value + tick, tick) return RangeIndex(rng) - def _can_range_setop(self, other): + def _can_range_setop(self, other) -> bool: return isinstance(self.freq, Tick) and isinstance(other.freq, Tick) - def _wrap_range_setop(self, other, res_i8): + def _wrap_range_setop(self, other, res_i8) -> Self: new_freq = None if not len(res_i8): # RangeIndex defaults to step=1, which we don't want. @@ -508,16 +508,16 @@ def _wrap_range_setop(self, other, res_i8): result = type(self._data)._simple_new( res_values, dtype=self.dtype, freq=new_freq ) - return self._wrap_setop_result(other, result) + return cast("Self", self._wrap_setop_result(other, result)) - def _range_intersect(self, other, sort): + def _range_intersect(self, other, sort) -> Self: # Dispatch to RangeIndex intersection logic. left = self._as_range_index right = other._as_range_index res_i8 = left.intersection(right, sort=sort) return self._wrap_range_setop(other, res_i8) - def _range_union(self, other, sort): + def _range_union(self, other, sort) -> Self: # Dispatch to RangeIndex union logic. left = self._as_range_index right = other._as_range_index @@ -747,7 +747,7 @@ def _get_insert_freq(self, loc: int, item): return freq @doc(NDArrayBackedExtensionIndex.delete) - def delete(self, loc) -> DatetimeTimedeltaMixin: + def delete(self, loc) -> Self: result = super().delete(loc) result._data._freq = self._get_delete_freq(loc) return result @@ -771,7 +771,7 @@ def take( allow_fill: bool = True, fill_value=None, **kwargs, - ): + ) -> Self: nv.validate_take((), kwargs) indices = np.asarray(indices, dtype=np.intp) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9690806afb173..991195cafccfc 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -57,6 +57,7 @@ DtypeObj, Frequency, IntervalClosedType, + Self, TimeAmbiguous, TimeNonexistent, npt, @@ -266,7 +267,7 @@ def strftime(self, date_format) -> Index: return Index(arr, name=self.name, dtype=object) @doc(DatetimeArray.tz_convert) - def tz_convert(self, tz) -> DatetimeIndex: + def tz_convert(self, tz) -> Self: arr = self._data.tz_convert(tz) return type(self)._simple_new(arr, name=self.name, refs=self._references) @@ -276,7 +277,7 @@ def tz_localize( tz, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", - ) -> DatetimeIndex: + ) -> Self: arr = self._data.tz_localize(tz, ambiguous, nonexistent) return type(self)._simple_new(arr, name=self.name) @@ -317,7 +318,7 @@ def __new__( dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, - ) -> DatetimeIndex: + ) -> Self: if is_scalar(data): cls._raise_scalar_data_error(data) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index eae70d50e7f95..2fa2b7f54639d 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -49,6 +49,7 @@ from pandas._typing import ( Dtype, DtypeObj, + Self, npt, ) _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -175,7 +176,7 @@ def _resolution_obj(self) -> Resolution: other_name="PeriodArray", **_shared_doc_kwargs, ) - def asfreq(self, freq=None, how: str = "E") -> PeriodIndex: + def asfreq(self, freq=None, how: str = "E") -> Self: arr = self._data.asfreq(freq, how) return type(self)._simple_new(arr, name=self.name) @@ -211,7 +212,7 @@ def __new__( copy: bool = False, name: Hashable = None, **fields, - ) -> PeriodIndex: + ) -> Self: valid_field_set = { "year", "month", @@ -272,7 +273,7 @@ def __new__( # Data @property - def values(self) -> np.ndarray: + def values(self) -> npt.NDArray[np.object_]: return np.asarray(self, dtype=object) def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]: @@ -476,7 +477,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) @doc(DatetimeIndexOpsMixin.shift) - def shift(self, periods: int = 1, freq=None): + def shift(self, periods: int = 1, freq=None) -> Self: if freq is not None: raise TypeError( f"`freq` argument is not supported for {type(self).__name__}.shift" diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 66c5a12549f23..9b3309706e12b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -51,6 +51,7 @@ if TYPE_CHECKING: from pandas._typing import ( Dtype, + Self, npt, ) _empty_range = range(0) @@ -144,9 +145,7 @@ def __new__( return cls._simple_new(rng, name=name) @classmethod - def from_range( - cls, data: range, name=None, dtype: Dtype | None = None - ) -> RangeIndex: + def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self: """ Create RangeIndex from a range object. @@ -168,7 +167,7 @@ def from_range( @classmethod def _simple_new( # type: ignore[override] cls, values: range, name: Hashable = None - ) -> RangeIndex: + ) -> Self: result = object.__new__(cls) assert isinstance(values, range) @@ -216,7 +215,7 @@ def _get_data_as_items(self): return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)] def __reduce__(self): - d = {"name": self.name} + d = {"name": self._name} d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None @@ -228,8 +227,8 @@ def _format_attrs(self): Return a list of tuples of the (attr, formatted_value) """ attrs = self._get_data_as_items() - if self.name is not None: - attrs.append(("name", ibase.default_pprint(self.name))) + if self._name is not None: + attrs.append(("name", ibase.default_pprint(self._name))) return attrs def _format_data(self, name=None): @@ -398,7 +397,7 @@ def __iter__(self) -> Iterator[int]: @doc(Index._shallow_copy) def _shallow_copy(self, values, name: Hashable = no_default): - name = self.name if name is no_default else name + name = self._name if name is no_default else name if values.dtype.kind == "f": return Index(values, name=name, dtype=np.float64) @@ -412,13 +411,13 @@ def _shallow_copy(self, values, name: Hashable = no_default): else: return self._constructor._simple_new(values, name=name) - def _view(self: RangeIndex) -> RangeIndex: + def _view(self) -> Self: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result @doc(Index.copy) - def copy(self, name: Hashable = None, deep: bool = False): + def copy(self, name: Hashable = None, deep: bool = False) -> Self: name = self._validate_names(name=name, deep=deep)[0] new_index = self._rename(name=name) return new_index @@ -814,17 +813,17 @@ def insert(self, loc: int, item) -> Index: rng = self._range if loc == 0 and item == self[0] - self.step: new_rng = range(rng.start - rng.step, rng.stop, rng.step) - return type(self)._simple_new(new_rng, name=self.name) + return type(self)._simple_new(new_rng, name=self._name) elif loc == len(self) and item == self[-1] + self.step: new_rng = range(rng.start, rng.stop + rng.step, rng.step) - return type(self)._simple_new(new_rng, name=self.name) + return type(self)._simple_new(new_rng, name=self._name) elif len(self) == 2 and item == self[0] + self.step / 2: # e.g. inserting 1 into [0, 2] step = int(self.step / 2) new_rng = range(self.start, self.stop, step) - return type(self)._simple_new(new_rng, name=self.name) + return type(self)._simple_new(new_rng, name=self._name) return super().insert(loc, item) @@ -922,7 +921,7 @@ def __getitem__(self, key): ) return super().__getitem__(key) - def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: + def _getitem_slice(self, slobj: slice) -> Self: """ Fastpath for __getitem__ when we know we have a slice. """ @@ -937,11 +936,11 @@ def __floordiv__(self, other): step = self.step // other stop = start + len(self) * step new_range = range(start, stop, step or 1) - return self._simple_new(new_range, name=self.name) + return self._simple_new(new_range, name=self._name) if len(self) == 1: start = self.start // other new_range = range(start, start + 1, 1) - return self._simple_new(new_range, name=self.name) + return self._simple_new(new_range, name=self._name) return super().__floordiv__(other) diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 407e16e1fa187..d72266346e654 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -167,7 +167,7 @@ def set_axis(self, axis: AxisInt, new_labels: Index) -> None: axis = self._normalize_axis(axis) self._axes[axis] = new_labels - def get_dtypes(self) -> np.ndarray: + def get_dtypes(self) -> npt.NDArray[np.object_]: return np.array([arr.dtype for arr in self.arrays], dtype="object") def add_references(self, mgr: BaseArrayManager) -> None: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f48b044ff0016..6dcb73f6793ad 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2173,7 +2173,7 @@ def shift( def _catch_deprecated_value_error(err: Exception) -> None: """ We catch ValueError for now, but only a specific one raised by DatetimeArray - which will no longer be raised in version.2.0. + which will no longer be raised in version 2.0. """ if isinstance(err, ValueError): if isinstance(err, IncompatibleFrequency): @@ -2431,8 +2431,8 @@ def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: def extract_pandas_array( - values: np.ndarray | ExtensionArray, dtype: DtypeObj | None, ndim: int -) -> tuple[np.ndarray | ExtensionArray, DtypeObj | None]: + values: ArrayLike, dtype: DtypeObj | None, ndim: int +) -> tuple[ArrayLike, DtypeObj | None]: """ Ensure that we don't allow PandasArray / PandasDtype in internals. """ @@ -2492,7 +2492,7 @@ def to_native_types( float_format=None, decimal: str = ".", **kwargs, -) -> np.ndarray: +) -> npt.NDArray[np.object_]: """convert to our native types format""" if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm": # GH#40754 Convert categorical datetimes to datetime array diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index cb644c8329179..ed1a9b193b3e4 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -273,8 +273,8 @@ def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool: ref = weakref.ref(self.blocks[blkno]) return ref in mgr.blocks[blkno].refs.referenced_blocks - def get_dtypes(self): - dtypes = np.array([blk.dtype for blk in self.blocks]) + def get_dtypes(self) -> npt.NDArray[np.object_]: + dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object) return dtypes.take(self.blknos) @property @@ -2010,8 +2010,8 @@ def index(self) -> Index: def dtype(self) -> DtypeObj: return self._block.dtype - def get_dtypes(self) -> np.ndarray: - return np.array([self._block.dtype]) + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([self._block.dtype], dtype=object) def external_values(self): """The array that Series.values returns""" diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index 2fa059178d238..e9f1eaabbe246 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -11,7 +11,6 @@ ) from typing import ( TYPE_CHECKING, - Any, Callable, Hashable, Sequence, @@ -77,7 +76,7 @@ def describe_ndframe( ------- Dataframe or series description. """ - percentiles = refine_percentiles(percentiles) + percentiles = _refine_percentiles(percentiles) describer: NDFrameDescriberAbstract if obj.ndim == 1: @@ -175,7 +174,7 @@ def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: d.columns = data.columns.copy() return d - def _select_data(self): + def _select_data(self) -> DataFrame: """Select columns to be described.""" if (self.include is None) and (self.exclude is None): # when some numerics are found, keep only numerics @@ -193,7 +192,7 @@ def _select_data(self): include=self.include, exclude=self.exclude, ) - return data + return data # pyright: ignore def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: @@ -229,9 +228,9 @@ def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: ) # GH#48340 - always return float on non-complex numeric data dtype: DtypeObj | None - if is_extension_array_dtype(series): + if is_extension_array_dtype(series.dtype): dtype = Float64Dtype() - elif is_numeric_dtype(series) and not is_complex_dtype(series): + elif is_numeric_dtype(series.dtype) and not is_complex_dtype(series.dtype): dtype = np.dtype("float") else: dtype = None @@ -364,9 +363,9 @@ def select_describe_func( return describe_categorical_1d -def refine_percentiles( +def _refine_percentiles( percentiles: Sequence[float] | np.ndarray | None, -) -> np.ndarray[Any, np.dtype[np.float64]]: +) -> npt.NDArray[np.float64]: """ Ensure that percentiles are unique and sorted. diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index eaeef4d00e23e..33019269aa3b7 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -420,7 +420,7 @@ def test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype): [pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)], ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"], ) -def test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value, request): +def test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value): dtype = np.dtype(any_numpy_dtype) # filling anything but timedelta with timedelta casts to object diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 40c8e4fa27f90..090b3d64e7c41 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1803,12 +1803,12 @@ def test_alignment_non_pandas(self, val): align = DataFrame._align_for_op expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index) - tm.assert_frame_equal(align(df, val, "index")[1], expected) + tm.assert_frame_equal(align(df, val, axis=0)[1], expected) expected = DataFrame( {"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index ) - tm.assert_frame_equal(align(df, val, "columns")[1], expected) + tm.assert_frame_equal(align(df, val, axis=1)[1], expected) @pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]) def test_alignment_non_pandas_length_mismatch(self, val): @@ -1820,10 +1820,10 @@ def test_alignment_non_pandas_length_mismatch(self, val): # length mismatch msg = "Unable to coerce to Series, length must be 3: given 2" with pytest.raises(ValueError, match=msg): - align(df, val, "index") + align(df, val, axis=0) with pytest.raises(ValueError, match=msg): - align(df, val, "columns") + align(df, val, axis=1) def test_alignment_non_pandas_index_columns(self): index = ["A", "B", "C"] @@ -1833,11 +1833,11 @@ def test_alignment_non_pandas_index_columns(self): align = DataFrame._align_for_op val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) tm.assert_frame_equal( - align(df, val, "index")[1], + align(df, val, axis=0)[1], DataFrame(val, index=df.index, columns=df.columns), ) tm.assert_frame_equal( - align(df, val, "columns")[1], + align(df, val, axis=1)[1], DataFrame(val, index=df.index, columns=df.columns), ) @@ -1845,19 +1845,19 @@ def test_alignment_non_pandas_index_columns(self): msg = "Unable to coerce to DataFrame, shape must be" val = np.array([[1, 2, 3], [4, 5, 6]]) with pytest.raises(ValueError, match=msg): - align(df, val, "index") + align(df, val, axis=0) with pytest.raises(ValueError, match=msg): - align(df, val, "columns") + align(df, val, axis=1) val = np.zeros((3, 3, 3)) msg = re.escape( "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)" ) with pytest.raises(ValueError, match=msg): - align(df, val, "index") + align(df, val, axis=0) with pytest.raises(ValueError, match=msg): - align(df, val, "columns") + align(df, val, axis=1) def test_no_warning(self, all_arithmetic_operators): df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index aae51ebc5a017..71ce8541de24b 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -438,7 +438,7 @@ def test_astype_ea_to_datetimetzdtype(self, dtype): tm.assert_series_equal(result, expected) - def test_astype_retain_Attrs(self, any_numpy_dtype): + def test_astype_retain_attrs(self, any_numpy_dtype): # GH#44414 ser = Series([0, 1, 2, 3]) ser.attrs["Location"] = "Michigan" diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index 2c427399c9cd5..fcadb07a13b83 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import ( NA, Categorical, @@ -300,13 +302,11 @@ def test_reindex_fill_value(): tm.assert_series_equal(result, expected) +@td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) @pytest.mark.parametrize("fill_value", ["string", 0, Timedelta(0)]) def test_reindex_fill_value_datetimelike_upcast(dtype, fill_value, using_array_manager): # https://github.com/pandas-dev/pandas/issues/42921 - if using_array_manager: - pytest.skip("Array manager does not promote dtype, hence we fail") - if dtype == "timedelta64[ns]" and fill_value == Timedelta(0): # use the scalar that is not compatible with the dtype for this test fill_value = Timestamp(0) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ae5543ff266ef..58bdf3666caf4 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1306,7 +1306,7 @@ def test_categorical_zeroes(self): ) tm.assert_series_equal(result, expected, check_index_type=True) - def test_dropna(self): + def test_value_counts_dropna(self): # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 tm.assert_series_equal( diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 5962d52edae3e..7d3aaf7fd3744 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1085,7 +1085,7 @@ def test_to_datetime_array_of_dt64s(self, cache, unit): # A list of datetimes where the last one is out of bounds dts_with_oob = dts + [np.datetime64("9999-01-01")] - # As of GH#?? we do not raise in this case + # As of GH#51978 we do not raise in this case to_datetime(dts_with_oob, errors="raise") result = to_datetime(dts_with_oob, errors="coerce", cache=cache)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52248
2023-03-27T21:27:05Z
2023-03-29T21:31:27Z
2023-03-29T21:31:27Z
2023-03-29T21:32:13Z
DOC: Add .map to ExtensionArray reference
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 45df480779ee7..2d307859eb7a1 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -524,6 +524,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then pandas.api.extensions.ExtensionArray.insert \ pandas.api.extensions.ExtensionArray.isin \ pandas.api.extensions.ExtensionArray.isna \ + pandas.api.extensions.ExtensionArray.map \ pandas.api.extensions.ExtensionArray.ravel \ pandas.api.extensions.ExtensionArray.searchsorted \ pandas.api.extensions.ExtensionArray.shift \ diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index b33efd388bd60..f25f0f5bc21fa 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -53,6 +53,7 @@ objects. api.extensions.ExtensionArray.insert api.extensions.ExtensionArray.isin api.extensions.ExtensionArray.isna + api.extensions.ExtensionArray.map api.extensions.ExtensionArray.ravel api.extensions.ExtensionArray.repeat api.extensions.ExtensionArray.searchsorted diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index a5032c590300c..d583a68ab4e0b 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1719,6 +1719,12 @@ def map(self, mapper, na_action=None): The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned. + + Examples + -------- + >>> ext_arr = pd.array([1, 2, 3]) + >>> ext_arr.map(str) + array(['1', '2', '3'], dtype=object) """ return map_array(self, mapper, na_action=na_action)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/52247
2023-03-27T20:37:40Z
2023-03-28T18:31:05Z
2023-03-28T18:31:05Z
2023-03-31T13:44:13Z
ENH: semi joins
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 1f8c93978c890..56ffb97b11500 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -38,6 +38,7 @@ Other enhancements - Improved error message when creating a DataFrame with empty data (0 rows), no index and an incorrect number of columns. (:issue:`52084`) - :meth:`DataFrame.applymap` now uses the :meth:`~api.extensions.ExtensionArray.map` method of underlying :class:`api.extensions.ExtensionArray` instances (:issue:`52219`) - :meth:`arrays.SparseArray.map` now supports ``na_action`` (:issue:`52096`). +- :meth:`DataFrame.merge` now supports ``how`` with ``leftsemi`` and ``rightsemi`` (:issue:`42784`) .. --------------------------------------------------------------------------- .. _whatsnew_210.notable_bug_fixes: diff --git a/pandas/_typing.py b/pandas/_typing.py index de02a549856ab..8f02bc8e73972 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -371,7 +371,7 @@ def closed(self) -> bool: AnyAll = Literal["any", "all"] # merge -MergeHow = Literal["left", "right", "inner", "outer", "cross"] +MergeHow = Literal["left", "right", "inner", "outer", "cross", "leftsemi", "rightsemi"] # join JoinHow = Literal["left", "right", "inner", "outer"] diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index c3dacc2172aa7..dc49b72b62bba 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -800,6 +800,30 @@ def get_result(self, copy: bool | None = True) -> DataFrame: if self.indicator: self.left, self.right = self._indicator_pre_merge(self.left, self.right) + if self.how in ["leftsemi", "rightsemi"]: + _leftdf = None + if self.how == "leftsemi": + _leftdf = self.left + if self.left_index and self.right_index: + _left = self.left.index + _right = self.right.index + elif self.on is not None or ( + None not in self.left_on and None not in self.right_on + ): + _left = self.left[self.left_on] + _right = self.right[self.right_on] + elif self.left_index and self.right_on is not None: + _left = self.left.index + _right = self.right[self.right_on] + elif self.right_index and self.left_on is not None: + _left = self.left[self.left_on] + _right = self.right.index + return ( + _semi_helper(_leftdf, _left, _right) + if _leftdf is not None + else _semi_helper(self.right, _right, _left) + ) + join_index, left_indexer, right_indexer = self._get_join_info() result = self._reindex_and_concat( @@ -1030,6 +1054,8 @@ def _get_join_info( ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # make mypy happy assert self.how != "cross" + assert self.how != "leftsemi" + assert self.how != "rightsemi" left_ax = self.left.axes[self.axis] right_ax = self.right.axes[self.axis] @@ -1253,11 +1279,12 @@ def _get_merge_keys( else: left_keys = [self.left.index._values] - if left_drop: - self.left = self.left._drop_labels_or_levels(left_drop) + if self.how not in ["leftsemi", "rightsemi"]: + if left_drop: + self.left = self.left._drop_labels_or_levels(left_drop) - if right_drop: - self.right = self.right._drop_labels_or_levels(right_drop) + if right_drop: + self.right = self.right._drop_labels_or_levels(right_drop) return left_keys, right_keys, join_names @@ -1596,6 +1623,25 @@ def _validate(self, validate: str) -> None: ) +def _semi_helper( + leftdf: DataFrame, + left: Index | DataFrame, + right: Index | DataFrame, +) -> DataFrame: + if not isinstance(left, Index): + if len(left.columns) == 1: + left = Index(left.values.flatten()) + else: + left = MultiIndex.from_frame(left) + if not isinstance(right, Index): + if len(right.columns) == 1: + right = Index(right.values.flatten()) + else: + right = MultiIndex.from_frame(right) + subset = left.isin(right) + return leftdf.loc[subset] + + def get_join_indexers( left_keys, right_keys, diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 6f2b327c37067..f9f4462db6f80 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2739,6 +2739,76 @@ def test_merge_different_index_names(): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize( + "data, index, how, left_index, right_index", + [ + ( + { + "a": [1, 1, 2, 3], + "b": [ + 1, + 2, + 3, + 4, + ], + }, + [1, 1, 2, 3], + "leftsemi", + False, + False, + ), + ( + {"a": [1, 2, 3, 3], "c": [5, 6, 7, 8]}, + [1, 2, 3, 4], + "rightsemi", + False, + False, + ), + ( + {"a": [1, 1, 2, 3, 4], "b": [1, 2, 3, 4, 5]}, + [1, 1, 2, 3, 4], + "leftsemi", + True, + True, + ), + ( + {"a": [1, 2, 3, 3], "c": [5, 6, 7, 8]}, + [1, 2, 3, 4], + "rightsemi", + False, + False, + ), + ], +) +def test_merge_semi(how, data, index, left_index, right_index): + # GH 42784 + left = DataFrame( + {"a": [1, 1, 2, 3, 4], "b": [1, 2, 3, 4, 5]}, index=[1, 1, 2, 3, 4] + ) + right = DataFrame({"a": [0, 1, 2, 3, 3], "c": [4, 5, 6, 7, 8]}) + result = left.merge(right, how=how, left_index=left_index, right_index=right_index) + expected = DataFrame(data, index=index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "expected, how", + [ + (DataFrame({"a": [1, 2], "b": [2, 3], "c": [2, 3]}, index=[1, 2]), "leftsemi"), + (DataFrame({"a": [1, 2], "c": [2, 3]}, index=[1, 2]), "rightsemi"), + ], +) +def test_merge_semi_multicol(expected, how): + # GH 42784 + left = DataFrame( + {"a": [1, 1, 2, 3, 4], "b": [1, 2, 3, 4, 5], "c": [1, 2, 3, 4, 5]}, + index=[1, 1, 2, 3, 4], + ) + right = DataFrame({"a": [0, 1, 2, 3, 3], "c": [1, 2, 3, 2, 1]}) + result = left.merge(right, how=how) + tm.assert_frame_equal(result, expected) + + def test_merge_ea(any_numeric_ea_dtype, join_type): # GH#44240 left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype)
- [x] closes #42784 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.1.0.rst` file if fixing a bug or adding a new feature. This continues prior pull request #49661 which is currently closed. I merged the changes from main into the development branch and moved the line added to the 2.0 release notes to the 2.1 release notes.
https://api.github.com/repos/pandas-dev/pandas/pulls/52243
2023-03-27T18:11:42Z
2023-04-03T19:01:41Z
null
2023-04-10T08:52:25Z
Backport PR #52195 on branch 2.0.x (WARN: Only warn about inconsistent parsing if there are multiple non-null elements)
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 0265b4404d6ab..3cd3dec185ccf 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -139,13 +139,16 @@ def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False) -> str ) if guessed_format is not None: return guessed_format - warnings.warn( - "Could not infer format, so each element will be parsed " - "individually, falling back to `dateutil`. To ensure parsing is " - "consistent and as-expected, please specify a format.", - UserWarning, - stacklevel=find_stack_level(), - ) + # If there are multiple non-null elements, warn about + # how parsing might not be consistent + if tslib.first_non_null(arr[first_non_null + 1 :]) != -1: + warnings.warn( + "Could not infer format, so each element will be parsed " + "individually, falling back to `dateutil`. To ensure parsing is " + "consistent and as-expected, please specify a format.", + UserWarning, + stacklevel=find_stack_level(), + ) return None diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index f3c49471b5bb2..8c3474220cde8 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1252,13 +1252,15 @@ def test_bad_date_parse(all_parsers, cache_dates, value): parser = all_parsers s = StringIO((f"{value},\n") * 50000) - if parser.engine == "pyarrow": + if parser.engine == "pyarrow" and not cache_dates: # None in input gets converted to 'None', for which # pandas tries to guess the datetime format, triggering # the warning. TODO: parse dates directly in pyarrow, see # https://github.com/pandas-dev/pandas/issues/48017 warn = UserWarning else: + # Note: warning is not raised if 'cache_dates', because here there is only a + # single unique date and hence no risk of inconsistent parsing. warn = None parser.read_csv_check_warnings( warn, @@ -1285,6 +1287,10 @@ def test_bad_date_parse_with_warning(all_parsers, cache_dates, value): # TODO: parse dates directly in pyarrow, see # https://github.com/pandas-dev/pandas/issues/48017 warn = None + elif cache_dates: + # Note: warning is not raised if 'cache_dates', because here there is only a + # single unique date and hence no risk of inconsistent parsing. + warn = None else: warn = UserWarning parser.read_csv_check_warnings( @@ -1737,9 +1743,7 @@ def test_parse_timezone(all_parsers): def test_invalid_parse_delimited_date(all_parsers, date_string): parser = all_parsers expected = DataFrame({0: [date_string]}, dtype="object") - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(date_string), header=None, parse_dates=[0], @@ -2063,9 +2067,7 @@ def test_infer_first_column_as_index(all_parsers): # GH#11019 parser = all_parsers data = "a,b,c\n1970-01-01,2,3,4" - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(data), parse_dates=["a"], ) diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index 4823df1da9959..f818d621c744f 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -124,9 +124,7 @@ def test_usecols_with_parse_dates4(all_parsers): } expected = DataFrame(cols, columns=["a_b"] + list("cdefghij")) - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(data), usecols=usecols, parse_dates=parse_dates, diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 07529fcbb49b7..ae5543ff266ef 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1231,8 +1231,7 @@ def test_value_counts_datetime_outofbounds(self): tm.assert_series_equal(res, exp) # GH 12424 - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore") + res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore") exp = Series(["2362-01-01", np.nan], dtype=object) tm.assert_series_equal(res, exp) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 384190404e449..7a48447fffe6c 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -271,8 +271,7 @@ def test_to_datetime_with_NA(self, data, format, expected): def test_to_datetime_with_NA_with_warning(self): # GH#42957 - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result = to_datetime(["201010", pd.NA]) + result = to_datetime(["201010", pd.NA]) expected = DatetimeIndex(["2010-10-20", "NaT"]) tm.assert_index_equal(result, expected) @@ -946,8 +945,7 @@ def test_to_datetime_YYYYMMDD(self): def test_to_datetime_unparsable_ignore(self): # unparsable ser = "Month 1, 1999" - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - assert to_datetime(ser, errors="ignore") == ser + assert to_datetime(ser, errors="ignore") == ser @td.skip_if_windows # `tm.set_timezone` does not work in windows def test_to_datetime_now(self): @@ -1344,17 +1342,13 @@ def test_invalid_format_raises(self, errors): to_datetime(["00:00:00"], format="H%:M%:S%", errors=errors) @pytest.mark.parametrize("value", ["a", "00:01:99"]) - @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] - ) - def test_datetime_invalid_scalar(self, value, format, warning): + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_invalid_scalar(self, value, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="ignore", format=format) + res = to_datetime(value, errors="ignore", format=format) assert res == value - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="coerce", format=format) + res = to_datetime(value, errors="coerce", format=format) assert res is NaT msg = "|".join( @@ -1368,21 +1362,16 @@ def test_datetime_invalid_scalar(self, value, format, warning): ] ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): - to_datetime(value, errors="raise", format=format) + to_datetime(value, errors="raise", format=format) @pytest.mark.parametrize("value", ["3000/12/11 00:00:00"]) - @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] - ) - def test_datetime_outofbounds_scalar(self, value, format, warning): + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_outofbounds_scalar(self, value, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="ignore", format=format) + res = to_datetime(value, errors="ignore", format=format) assert res == value - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="coerce", format=format) + res = to_datetime(value, errors="coerce", format=format) assert res is NaT if format is not None: @@ -1391,22 +1380,26 @@ def test_datetime_outofbounds_scalar(self, value, format, warning): to_datetime(value, errors="raise", format=format) else: msg = "^Out of bounds .*, at position 0$" - with pytest.raises( - OutOfBoundsDatetime, match=msg - ), tm.assert_produces_warning(warning, match="Could not infer format"): + with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(value, errors="raise", format=format) - @pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]]) @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] + ("values"), [(["a"]), (["00:01:99"]), (["a", "b", "99:00:00"])] ) - def test_datetime_invalid_index(self, values, format, warning): + @pytest.mark.parametrize("format", [(None), ("%H:%M:%S")]) + def test_datetime_invalid_index(self, values, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): + # Not great to have logic in tests, but this one's hard to + # parametrise over + if format is None and len(values) > 1: + warn = UserWarning + else: + warn = None + with tm.assert_produces_warning(warn, match="Could not infer format"): res = to_datetime(values, errors="ignore", format=format) tm.assert_index_equal(res, Index(values)) - with tm.assert_produces_warning(warning, match="Could not infer format"): + with tm.assert_produces_warning(warn, match="Could not infer format"): res = to_datetime(values, errors="coerce", format=format) tm.assert_index_equal(res, DatetimeIndex([NaT] * len(values))) @@ -1421,7 +1414,7 @@ def test_datetime_invalid_index(self, values, format, warning): ] ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): + with tm.assert_produces_warning(warn, match="Could not infer format"): to_datetime(values, errors="raise", format=format) @pytest.mark.parametrize("utc", [True, None]) @@ -2220,10 +2213,7 @@ def test_to_datetime_barely_out_of_bounds(self): msg = "^Out of bounds nanosecond timestamp: .*, at position 0" with pytest.raises(OutOfBoundsDatetime, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(arr) + to_datetime(arr) @pytest.mark.parametrize( "arg, exp_str", @@ -2537,10 +2527,7 @@ def test_string_invalid_operation(self, cache): # GH #51084 with pytest.raises(ValueError, match="Unknown datetime string format"): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(invalid, errors="raise", cache=cache) + to_datetime(invalid, errors="raise", cache=cache) def test_string_na_nat_conversion(self, cache): # GH #999, #858 @@ -2567,22 +2554,15 @@ def test_string_na_nat_conversion_malformed(self, cache): # GH 10636, default is now 'raise' msg = r"Unknown datetime string format" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(malformed, errors="raise", cache=cache) + to_datetime(malformed, errors="raise", cache=cache) - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result = to_datetime(malformed, errors="ignore", cache=cache) + result = to_datetime(malformed, errors="ignore", cache=cache) # GH 21864 expected = Index(malformed) tm.assert_index_equal(result, expected) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(malformed, errors="raise", cache=cache) + to_datetime(malformed, errors="raise", cache=cache) def test_string_na_nat_conversion_with_name(self, cache): idx = ["a", "b", "c", "d", "e"] @@ -2811,14 +2791,13 @@ def test_to_datetime_series_start_with_nans(self, cache): tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "tz_name, offset, warning", - [("UTC", 0, None), ("UTC-3", 180, UserWarning), ("UTC+3", -180, UserWarning)], + "tz_name, offset", + [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)], ) - def test_infer_datetime_format_tz_name(self, tz_name, offset, warning): + def test_infer_datetime_format_tz_name(self, tz_name, offset): # GH 33133 ser = Series([f"2019-02-02 08:07:13 {tz_name}"]) - with tm.assert_produces_warning(warning, match="Could not infer format"): - result = to_datetime(ser) + result = to_datetime(ser) tz = timezone(timedelta(minutes=offset)) expected = Series([Timestamp("2019-02-02 08:07:13").tz_localize(tz)]) tm.assert_series_equal(result, expected) @@ -2866,25 +2845,21 @@ class TestDaysInMonth: # tests for issue #10154 @pytest.mark.parametrize( - "arg, format, warning", + "arg, format", [ - ["2015-02-29", None, UserWarning], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-02-32", "%Y-%m-%d", None], - ["2015-04-31", "%Y-%m-%d", None], + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-32", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], ], ) - def test_day_not_in_month_coerce(self, cache, arg, format, warning): - with tm.assert_produces_warning(warning, match="Could not infer format"): - assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) + def test_day_not_in_month_coerce(self, cache, arg, format): + assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) def test_day_not_in_month_raise(self, cache): msg = "day is out of range for month: 2015-02-29, at position 0" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime("2015-02-29", errors="raise", cache=cache) + to_datetime("2015-02-29", errors="raise", cache=cache) @pytest.mark.parametrize( "arg, format, msg", @@ -2929,72 +2904,71 @@ def test_day_not_in_month_raise_value(self, cache, arg, format, msg): to_datetime(arg, errors="raise", format=format, cache=cache) @pytest.mark.parametrize( - "expected, format, warning", + "expected, format", [ - ["2015-02-29", None, UserWarning], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-04-31", "%Y-%m-%d", None], + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-29", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], ], ) - def test_day_not_in_month_ignore(self, cache, expected, format, warning): - with tm.assert_produces_warning(warning, match="Could not infer format"): - result = to_datetime(expected, errors="ignore", format=format, cache=cache) + def test_day_not_in_month_ignore(self, cache, expected, format): + result = to_datetime(expected, errors="ignore", format=format, cache=cache) assert result == expected class TestDatetimeParsingWrappers: @pytest.mark.parametrize( - "date_str, expected, warning", + "date_str, expected", [ - ("2011-01-01", datetime(2011, 1, 1), None), - ("2Q2005", datetime(2005, 4, 1), UserWarning), - ("2Q05", datetime(2005, 4, 1), UserWarning), - ("2005Q1", datetime(2005, 1, 1), UserWarning), - ("05Q1", datetime(2005, 1, 1), UserWarning), - ("2011Q3", datetime(2011, 7, 1), UserWarning), - ("11Q3", datetime(2011, 7, 1), UserWarning), - ("3Q2011", datetime(2011, 7, 1), UserWarning), - ("3Q11", datetime(2011, 7, 1), UserWarning), + ("2011-01-01", datetime(2011, 1, 1)), + ("2Q2005", datetime(2005, 4, 1)), + ("2Q05", datetime(2005, 4, 1)), + ("2005Q1", datetime(2005, 1, 1)), + ("05Q1", datetime(2005, 1, 1)), + ("2011Q3", datetime(2011, 7, 1)), + ("11Q3", datetime(2011, 7, 1)), + ("3Q2011", datetime(2011, 7, 1)), + ("3Q11", datetime(2011, 7, 1)), # quarterly without space - ("2000Q4", datetime(2000, 10, 1), UserWarning), - ("00Q4", datetime(2000, 10, 1), UserWarning), - ("4Q2000", datetime(2000, 10, 1), UserWarning), - ("4Q00", datetime(2000, 10, 1), UserWarning), - ("2000q4", datetime(2000, 10, 1), UserWarning), - ("2000-Q4", datetime(2000, 10, 1), UserWarning), - ("00-Q4", datetime(2000, 10, 1), UserWarning), - ("4Q-2000", datetime(2000, 10, 1), UserWarning), - ("4Q-00", datetime(2000, 10, 1), UserWarning), - ("00q4", datetime(2000, 10, 1), UserWarning), - ("2005", datetime(2005, 1, 1), None), - ("2005-11", datetime(2005, 11, 1), None), - ("2005 11", datetime(2005, 11, 1), UserWarning), - ("11-2005", datetime(2005, 11, 1), UserWarning), - ("11 2005", datetime(2005, 11, 1), UserWarning), - ("200511", datetime(2020, 5, 11), UserWarning), - ("20051109", datetime(2005, 11, 9), None), - ("20051109 10:15", datetime(2005, 11, 9, 10, 15), None), - ("20051109 08H", datetime(2005, 11, 9, 8, 0), None), - ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15), None), - ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0), None), - ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15), None), - ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0), None), - ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28), None), - ("Thu Sep 25 2003", datetime(2003, 9, 25), None), - ("Sep 25 2003", datetime(2003, 9, 25), None), - ("January 1 2014", datetime(2014, 1, 1), None), + ("2000Q4", datetime(2000, 10, 1)), + ("00Q4", datetime(2000, 10, 1)), + ("4Q2000", datetime(2000, 10, 1)), + ("4Q00", datetime(2000, 10, 1)), + ("2000q4", datetime(2000, 10, 1)), + ("2000-Q4", datetime(2000, 10, 1)), + ("00-Q4", datetime(2000, 10, 1)), + ("4Q-2000", datetime(2000, 10, 1)), + ("4Q-00", datetime(2000, 10, 1)), + ("00q4", datetime(2000, 10, 1)), + ("2005", datetime(2005, 1, 1)), + ("2005-11", datetime(2005, 11, 1)), + ("2005 11", datetime(2005, 11, 1)), + ("11-2005", datetime(2005, 11, 1)), + ("11 2005", datetime(2005, 11, 1)), + ("200511", datetime(2020, 5, 11)), + ("20051109", datetime(2005, 11, 9)), + ("20051109 10:15", datetime(2005, 11, 9, 10, 15)), + ("20051109 08H", datetime(2005, 11, 9, 8, 0)), + ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0)), + ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0)), + ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28)), + ("Thu Sep 25 2003", datetime(2003, 9, 25)), + ("Sep 25 2003", datetime(2003, 9, 25)), + ("January 1 2014", datetime(2014, 1, 1)), # GHE10537 - ("2014-06", datetime(2014, 6, 1), None), - ("06-2014", datetime(2014, 6, 1), UserWarning), - ("2014-6", datetime(2014, 6, 1), None), - ("6-2014", datetime(2014, 6, 1), UserWarning), - ("20010101 12", datetime(2001, 1, 1, 12), None), - ("20010101 1234", datetime(2001, 1, 1, 12, 34), None), - ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56), None), + ("2014-06", datetime(2014, 6, 1)), + ("06-2014", datetime(2014, 6, 1)), + ("2014-6", datetime(2014, 6, 1)), + ("6-2014", datetime(2014, 6, 1)), + ("20010101 12", datetime(2001, 1, 1, 12)), + ("20010101 1234", datetime(2001, 1, 1, 12, 34)), + ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56)), ], ) - def test_parsers(self, date_str, expected, warning, cache): + def test_parsers(self, date_str, expected, cache): # dateutil >= 2.5.0 defaults to yearfirst=True # https://github.com/dateutil/dateutil/issues/217 yearfirst = True @@ -3002,13 +2976,12 @@ def test_parsers(self, date_str, expected, warning, cache): result1, _ = parsing.parse_datetime_string_with_reso( date_str, yearfirst=yearfirst ) - with tm.assert_produces_warning(warning, match="Could not infer format"): - result2 = to_datetime(date_str, yearfirst=yearfirst) - result3 = to_datetime([date_str], yearfirst=yearfirst) - # result5 is used below - result4 = to_datetime( - np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache - ) + result2 = to_datetime(date_str, yearfirst=yearfirst) + result3 = to_datetime([date_str], yearfirst=yearfirst) + # result5 is used below + result4 = to_datetime( + np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache + ) result6 = DatetimeIndex([date_str], yearfirst=yearfirst) # result7 is used below result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst) @@ -3117,10 +3090,9 @@ def test_parsers_dayfirst_yearfirst( result2 = Timestamp(date_str) assert result2 == expected - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result3 = to_datetime( - date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache - ) + result3 = to_datetime( + date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache + ) result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0] @@ -3137,9 +3109,8 @@ def test_parsers_timestring(self, date_str, exp_def): exp_now = parse(date_str) result1, _ = parsing.parse_datetime_string_with_reso(date_str) - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result2 = to_datetime(date_str) - result3 = to_datetime([date_str]) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) result4 = Timestamp(date_str) result5 = DatetimeIndex([date_str])[0] # parse time string return time string based on default date @@ -3316,10 +3287,7 @@ def test_incorrect_value_exception(self): "Unknown datetime string format, unable to parse: yesterday, at position 1" ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(["today", "yesterday"]) + to_datetime(["today", "yesterday"]) @pytest.mark.parametrize( "format, warning", @@ -3333,8 +3301,7 @@ def test_to_datetime_out_of_bounds_with_format_arg(self, format, warning): # see gh-23830 msg = r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0" with pytest.raises(OutOfBoundsDatetime, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): - to_datetime("2417-10-10 00:00:00", format=format) + to_datetime("2417-10-10 00:00:00", format=format) @pytest.mark.parametrize( "arg, origin, expected_str",
Backport PR #52195: WARN: Only warn about inconsistent parsing if there are multiple non-null elements
https://api.github.com/repos/pandas-dev/pandas/pulls/52242
2023-03-27T17:23:55Z
2023-03-27T19:58:47Z
2023-03-27T19:58:47Z
2023-03-27T19:58:48Z
Backport PR #51538 on branch 2.0.x (BUG: Timedelta comparisons with very large pytimedeltas overflowing)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 2263c8789f979..63961afaf02c4 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -1189,6 +1189,7 @@ Timedelta - Bug in :func:`to_timedelta` raising error when input has nullable dtype ``Float64`` (:issue:`48796`) - Bug in :class:`Timedelta` constructor incorrectly raising instead of returning ``NaT`` when given a ``np.timedelta64("nat")`` (:issue:`48898`) - Bug in :class:`Timedelta` constructor failing to raise when passed both a :class:`Timedelta` object and keywords (e.g. days, seconds) (:issue:`48898`) +- Bug in :class:`Timedelta` comparisons with very large ``datetime.timedelta`` objects incorrect raising ``OutOfBoundsTimedelta`` (:issue:`49021`) Timezones ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 6c0c02c66be6f..955e1cf95e04c 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -4,6 +4,10 @@ import warnings cimport cython from cpython.object cimport ( Py_EQ, + Py_GE, + Py_GT, + Py_LE, + Py_LT, Py_NE, PyObject, PyObject_RichCompare, @@ -1150,8 +1154,27 @@ cdef class _Timedelta(timedelta): if isinstance(other, _Timedelta): ots = other elif is_any_td_scalar(other): - ots = Timedelta(other) - # TODO: watch out for overflows + try: + ots = Timedelta(other) + except OutOfBoundsTimedelta as err: + # GH#49021 pytimedelta.max overflows + if not PyDelta_Check(other): + # TODO: handle this case + raise + ltup = (self.days, self.seconds, self.microseconds, self.nanoseconds) + rtup = (other.days, other.seconds, other.microseconds, 0) + if op == Py_EQ: + return ltup == rtup + elif op == Py_NE: + return ltup != rtup + elif op == Py_LT: + return ltup < rtup + elif op == Py_LE: + return ltup <= rtup + elif op == Py_GT: + return ltup > rtup + elif op == Py_GE: + return ltup >= rtup elif other is NaT: return op == Py_NE diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index d67d451e4fc6d..e583de1f489db 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -966,6 +966,70 @@ def test_td_op_timedelta_timedeltalike_array(self, op, arr): class TestTimedeltaComparison: + def test_compare_pytimedelta_bounds(self): + # GH#49021 don't overflow on comparison with very large pytimedeltas + + for unit in ["ns", "us"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax < timedelta.max + assert tdmax <= timedelta.max + assert not tdmax > timedelta.max + assert not tdmax >= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin > timedelta.min + assert tdmin >= timedelta.min + assert not tdmin < timedelta.min + assert not tdmin <= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + # But the "ms" and "s"-reso bounds extend pass pytimedelta + for unit in ["ms", "s"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax > timedelta.max + assert tdmax >= timedelta.max + assert not tdmax < timedelta.max + assert not tdmax <= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin < timedelta.min + assert tdmin <= timedelta.min + assert not tdmin > timedelta.min + assert not tdmin >= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + def test_compare_pytimedelta_bounds2(self): + # a pytimedelta outside the microsecond bounds + pytd = timedelta(days=999999999, seconds=86399) + # NB: np.timedelta64(td, "s"") incorrectly overflows + td64 = np.timedelta64(pytd.days, "D") + np.timedelta64(pytd.seconds, "s") + td = Timedelta(td64) + assert td.days == pytd.days + assert td.seconds == pytd.seconds + + assert td == pytd + assert not td != pytd + assert not td < pytd + assert not td > pytd + assert td <= pytd + assert td >= pytd + + td2 = td - Timedelta(seconds=1).as_unit("s") + assert td2 != pytd + assert not td2 == pytd + assert td2 < pytd + assert td2 <= pytd + assert not td2 > pytd + assert not td2 >= pytd + def test_compare_tick(self, tick_classes): cls = tick_classes
Backport PR #51538: BUG: Timedelta comparisons with very large pytimedeltas overflowing
https://api.github.com/repos/pandas-dev/pandas/pulls/52241
2023-03-27T17:17:53Z
2023-03-27T20:32:12Z
2023-03-27T20:32:12Z
2023-03-27T20:32:13Z
CI: Try running single_cpu/not single_cpu tests together
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index f990c6edaeb2a..08dd09e57871b 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -26,7 +26,8 @@ jobs: strategy: matrix: env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml, actions-311.yaml] - pattern: ["not single_cpu", "single_cpu"] + # Prevent the include jobs from overriding other jobs + pattern: [""] pyarrow_version: ["8", "9", "10"] include: - name: "Downstream Compat" @@ -100,7 +101,7 @@ jobs: PANDAS_COPY_ON_WRITE: ${{ matrix.pandas_copy_on_write || '0' }} PANDAS_CI: ${{ matrix.pandas_ci || '1' }} TEST_ARGS: ${{ matrix.test_args || '' }} - PYTEST_WORKERS: ${{ contains(matrix.pattern, 'not single_cpu') && 'auto' || '1' }} + PYTEST_WORKERS: 'auto' PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }} IS_PYPY: ${{ contains(matrix.env_file, 'pypy') }} # TODO: re-enable coverage on pypy, its slow @@ -169,9 +170,22 @@ jobs: pyarrow-version: ${{ matrix.pyarrow_version }} - name: Build Pandas + id: build uses: ./.github/actions/build_pandas - - name: Test + - name: Test (not single_cpu) uses: ./.github/actions/run-tests # TODO: Don't continue on error for PyPy continue-on-error: ${{ env.IS_PYPY == 'true' }} + env: + # Set pattern to not single_cpu if not already set + PATTERN: ${{ env.PATTERN == '' && 'not single_cpu' || matrix.pattern }} + + - name: Test (single_cpu) + uses: ./.github/actions/run-tests + # TODO: Don't continue on error for PyPy + continue-on-error: ${{ env.IS_PYPY == 'true' }} + env: + PATTERN: 'single_cpu' + PYTEST_WORKERS: 1 + if: ${{ matrix.pattern == '' && (always() && steps.build.outcome == 'success')}}
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. If I did this right, should get us from 20 jobs to around 14 jobs. This'll probably slow down the current jobs by ~10 minutes (but they already take 1 hr), but we'll save at least the 5 minutes of compiling pandas on the single_cpu jobs + whatever it takes to setup the conda envs there. Hopefully the shorter queue times during peak periods makes up for the extra time.
https://api.github.com/repos/pandas-dev/pandas/pulls/52239
2023-03-27T15:36:15Z
2023-03-30T00:15:49Z
2023-03-30T00:15:49Z
2023-03-30T01:14:42Z
COMPAT: Remove unnecessary memoryview workaround
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 164ed8a5c9227..5929647468785 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -850,17 +850,7 @@ def asof_join_nearest_on_X_by_Y(ndarray[numeric_t] left_values, numeric_t bdiff, fdiff # search both forward and backward - # TODO(cython3): - # Bug in beta1 preventing Cython from choosing - # right specialization when one fused memview is None - # Doesn't matter what type we choose - # (nothing happens anyways since it is None) - # GH 51640 - if left_by_values is not None and left_by_values.dtype != object: - by_dtype = f"{left_by_values.dtype}_t" - else: - by_dtype = object - bli, bri = asof_join_backward_on_X_by_Y[f"{left_values.dtype}_t", by_dtype]( + bli, bri = asof_join_backward_on_X_by_Y( left_values, right_values, left_by_values, @@ -869,7 +859,7 @@ def asof_join_nearest_on_X_by_Y(ndarray[numeric_t] left_values, tolerance, use_hashtable ) - fli, fri = asof_join_forward_on_X_by_Y[f"{left_values.dtype}_t", by_dtype]( + fli, fri = asof_join_forward_on_X_by_Y( left_values, right_values, left_by_values, diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index a904f4d9fbe13..e8fd3398c4db8 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2207,13 +2207,7 @@ def injection(obj: ArrayLike): else: # choose appropriate function by type func = _asof_by_function(self.direction) - # TODO(cython3): - # Bug in beta1 preventing Cython from choosing - # right specialization when one fused memview is None - # Doesn't matter what type we choose - # (nothing happens anyways since it is None) - # GH 51640 - return func[f"{left_values.dtype}_t", object]( + return func( left_values, right_values, None,
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52237
2023-03-27T11:40:40Z
2023-07-18T19:18:10Z
2023-07-18T19:18:10Z
2023-07-18T19:53:21Z
BUG: Removed bug from groupby/min on categoricals
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 6f361ff867c35..b72a21f1aa0c6 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -402,6 +402,8 @@ def _ea_wrap_cython_operation( if self.how in self.cast_blocklist: return res_values + elif self.how in ["first", "last", "min", "max"]: + res_values[result_mask == 1] = -1 return values._from_backing_data(res_values) npvalues = self._ea_to_cython_values(values) diff --git a/pandas/tests/groupby/test_min_max.py b/pandas/tests/groupby/test_min_max.py index 8602f8bdb1aa1..37eb52be0b37b 100644 --- a/pandas/tests/groupby/test_min_max.py +++ b/pandas/tests/groupby/test_min_max.py @@ -247,3 +247,26 @@ def test_min_max_nullable_uint64_empty_group(): res = gb.max() expected.iloc[0, 0] = 9 tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize("func", ["first", "last", "min", "max"]) +def test_groupby_min_max_categorical(func): + # GH: 52151 + df = DataFrame( + { + "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True), + "col2": pd.Categorical([1], categories=[1, 2], ordered=True), + "value": 0.1, + } + ) + result = getattr(df.groupby("col1", observed=False), func)() + + idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True) + expected = DataFrame( + { + "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True), + "value": [0.1, None], + }, + index=idx, + ) + tm.assert_frame_equal(result, expected)
- [ x] closes #52151 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52236
2023-03-27T10:51:42Z
2023-03-30T16:06:47Z
2023-03-30T16:06:47Z
2023-03-30T16:11:01Z
read_fwf with urlopen test GH#26376
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index 37a75d9f59920..d79968a580e40 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -271,7 +271,10 @@ def can_connect(url, error_classes=None) -> bool: try: with urlopen(url, timeout=20) as response: # Timeout just in case rate-limiting is applied - if response.status != 200: + if ( + response.info().get("Content-type") == "text/html" + and response.status != 200 + ): return False except error_classes: return False diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index d166946704e13..2a05a3aa3297e 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -28,6 +28,7 @@ ) from pandas.tests.io.test_compression import _compression_to_extension +from pandas.io.common import urlopen from pandas.io.parsers import ( read_csv, read_fwf, @@ -1010,3 +1011,50 @@ def test_invalid_dtype_backend(): ) with pytest.raises(ValueError, match=msg): read_fwf("test", dtype_backend="numpy") + + +@pytest.mark.network +@tm.network( + url="ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt", + check_before_test=True, +) +def test_url_urlopen(): + expected = pd.Index( + [ + "CC", + "Network", + "Code", + "StationId", + "Latitude", + "Longitude", + "Elev", + "dummy", + "StationName", + "From", + "To", + "Nrec", + ], + dtype="object", + ) + url = "ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt" + with urlopen(url) as f: + result = read_fwf( + f, + widths=(2, 1, 3, 5, 9, 10, 7, 4, 30, 5, 5, 7), + names=( + "CC", + "Network", + "Code", + "StationId", + "Latitude", + "Longitude", + "Elev", + "dummy", + "StationName", + "From", + "To", + "Nrec", + ), + ).columns + + tm.assert_index_equal(result, expected)
- [ ] closes #26376 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). To avoid download file, I just compare the columns.
https://api.github.com/repos/pandas-dev/pandas/pulls/52233
2023-03-27T05:21:17Z
2023-03-29T21:35:18Z
2023-03-29T21:35:18Z
2023-03-29T23:30:30Z
CI: update autotyping
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d4baa638bdda2..de36bf2d441c5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -413,8 +413,8 @@ repos: language: python stages: [manual] additional_dependencies: - - autotyping==22.9.0 - - libcst==0.4.7 + - autotyping==23.3.0 + - libcst==0.4.9 - id: check-test-naming name: check that test names start with 'test' entry: python -m scripts.check_test_naming diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 56d505d024949..e14f51df24a8a 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -737,7 +737,7 @@ def pp(name: str, ks: Iterable[str]) -> list[str]: @contextmanager -def config_prefix(prefix) -> Generator[None, None, None]: +def config_prefix(prefix: str) -> Generator[None, None, None]: """ contextmanager for multiple invocations of API with a common prefix diff --git a/pandas/_testing/_random.py b/pandas/_testing/_random.py index f0a0437a5bfc6..af8c7b4870f4c 100644 --- a/pandas/_testing/_random.py +++ b/pandas/_testing/_random.py @@ -10,7 +10,9 @@ RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) -def rands_array(nchars, size, dtype: NpDtype = "O", replace: bool = True) -> np.ndarray: +def rands_array( + nchars, size: int, dtype: NpDtype = "O", replace: bool = True +) -> np.ndarray: """ Generate an array of byte strings. """ diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py index db4ddd45db955..fb5b7b967f6bf 100644 --- a/pandas/_testing/contexts.py +++ b/pandas/_testing/contexts.py @@ -154,7 +154,7 @@ def ensure_safe_environment_variables() -> Generator[None, None, None]: @contextmanager -def with_csv_dialect(name, **kwargs) -> Generator[None, None, None]: +def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]: """ Context manager to temporarily register a CSV dialect for parsing CSV. diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 8b2916bf1ded9..f6e80aba0c34f 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -342,7 +342,7 @@ def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> ) -def validate_groupby_func(name, args, kwargs, allowed=None) -> None: +def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None: """ 'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 58da2cd994777..1b36659944561 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -51,13 +51,13 @@ class PandasDelegate: Abstract base class for delegating methods/properties. """ - def _delegate_property_get(self, name, *args, **kwargs): + def _delegate_property_get(self, name: str, *args, **kwargs): raise TypeError(f"You cannot access the property {name}") - def _delegate_property_set(self, name, value, *args, **kwargs): + def _delegate_property_set(self, name: str, value, *args, **kwargs): raise TypeError(f"The property {name} cannot be set") - def _delegate_method(self, name, *args, **kwargs): + def _delegate_method(self, name: str, *args, **kwargs): raise TypeError(f"You cannot call method {name}") @classmethod @@ -91,7 +91,7 @@ def _add_delegate_accessors( False skips the missing accessor. """ - def _create_delegator_property(name): + def _create_delegator_property(name: str): def _getter(self): return self._delegate_property_get(name) @@ -107,7 +107,7 @@ def _setter(self, new_values): doc=getattr(delegate, accessor_mapping(name)).__doc__, ) - def _create_delegator_method(name): + def _create_delegator_method(name: str): def f(self, *args, **kwargs): return self._delegate_method(name, *args, **kwargs) @@ -231,7 +231,7 @@ def __get__(self, obj, cls): @doc(klass="", others="") -def _register_accessor(name, cls): +def _register_accessor(name: str, cls): """ Register a custom accessor on {klass} objects. @@ -320,21 +320,21 @@ def decorator(accessor): @doc(_register_accessor, klass="DataFrame") -def register_dataframe_accessor(name): +def register_dataframe_accessor(name: str): from pandas import DataFrame return _register_accessor(name, DataFrame) @doc(_register_accessor, klass="Series") -def register_series_accessor(name): +def register_series_accessor(name: str): from pandas import Series return _register_accessor(name, Series) @doc(_register_accessor, klass="Index") -def register_index_accessor(name): +def register_index_accessor(name: str): from pandas import Index return _register_accessor(name, Index) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index c0cca1852b446..acf8cbc8fd545 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -291,14 +291,14 @@ def __getitem__( return result def _fill_mask_inplace( - self, method: str, limit, mask: npt.NDArray[np.bool_] + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] ) -> None: # (for now) when self.ndim == 2, we assume axis=0 func = missing.get_fill_func(method, ndim=self.ndim) func(self._ndarray.T, limit=limit, mask=mask.T) @doc(ExtensionArray.fillna) - def fillna(self, value=None, method=None, limit=None) -> Self: + def fillna(self, value=None, method=None, limit: int | None = None) -> Self: value, method = validate_fillna_kwargs( value, method, validate_scalar_dict_value=False ) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 6b722d800519c..fc303536b337b 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1952,7 +1952,7 @@ def _str_translate(self, table): "str.translate not supported with pd.ArrowDtype(pa.string())." ) - def _str_wrap(self, width, **kwargs): + def _str_wrap(self, width: int, **kwargs): raise NotImplementedError( "str.wrap not supported with pd.ArrowDtype(pa.string())." ) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 20489654d7700..a5032c590300c 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1570,7 +1570,7 @@ def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: return result def _fill_mask_inplace( - self, method: str, limit, mask: npt.NDArray[np.bool_] + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] ) -> None: """ Replace values in locations specified by 'mask' using pad or backfill. diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 0a79004871f5f..f8befdbc6ca9c 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2500,10 +2500,14 @@ def _validate(data): if not is_categorical_dtype(data.dtype): raise AttributeError("Can only use .cat accessor with a 'category' dtype") - def _delegate_property_get(self, name): + # error: Signature of "_delegate_property_get" incompatible with supertype + # "PandasDelegate" + def _delegate_property_get(self, name: str): # type: ignore[override] return getattr(self._parent, name) - def _delegate_property_set(self, name, new_values): + # error: Signature of "_delegate_property_set" incompatible with supertype + # "PandasDelegate" + def _delegate_property_set(self, name: str, new_values): # type: ignore[override] return setattr(self._parent, name, new_values) @property @@ -2515,7 +2519,7 @@ def codes(self) -> Series: return Series(self._parent.codes, index=self._index) - def _delegate_method(self, name, *args, **kwargs): + def _delegate_method(self, name: str, *args, **kwargs): from pandas import Series method = getattr(self._parent, name) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 296e8e0784e38..deccfaf12c036 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -113,7 +113,7 @@ def tz_to_dtype(tz: tzinfo | None, unit: str = "ns"): return DatetimeTZDtype(tz=tz, unit=unit) -def _field_accessor(name: str, field: str, docstring=None): +def _field_accessor(name: str, field: str, docstring: str | None = None): def f(self): values = self._local_timestamps() diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index abc5606798cd9..3e32598cc6b11 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -889,7 +889,7 @@ def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOr indexer = obj.argsort()[-1] return obj[indexer] - def fillna(self, value=None, method=None, limit=None) -> Self: + def fillna(self, value=None, method=None, limit: int | None = None) -> Self: """ Fill NA/NaN values using the specified method. diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 8591cf2d3a4c5..aa3516c3ecb4f 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -162,7 +162,7 @@ def __getitem__(self, item: PositionalIndexer) -> Self | Any: @doc(ExtensionArray.fillna) @doc(ExtensionArray.fillna) - def fillna(self, value=None, method=None, limit=None) -> Self: + def fillna(self, value=None, method=None, limit: int | None = None) -> Self: value, method = validate_fillna_kwargs(value, method) mask = self._mask diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 64ce896077fc1..6557a4b674b4f 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -98,7 +98,7 @@ } -def _field_accessor(name: str, docstring=None): +def _field_accessor(name: str, docstring: str | None = None): def f(self): base = self.freq._period_dtype_code result = get_period_field_arr(name, self.asi8, base) @@ -658,7 +658,7 @@ def searchsorted( m8arr = self._ndarray.view("M8[ns]") return m8arr.searchsorted(npvalue, side=side, sorter=sorter) - def fillna(self, value=None, method=None, limit=None) -> PeriodArray: + def fillna(self, value=None, method=None, limit: int | None = None) -> PeriodArray: if method is not None: # view as dt64 so we get treated as timelike in core.missing, # similar to dtl._period_dispatch diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index ca1e73d3e6865..24ae13c12ad32 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -46,10 +46,10 @@ def _validate(self, data): if not isinstance(data.dtype, SparseDtype): raise AttributeError(self._validation_msg) - def _delegate_property_get(self, name, *args, **kwargs): + def _delegate_property_get(self, name: str, *args, **kwargs): return getattr(self._parent.array, name) - def _delegate_method(self, name, *args, **kwargs): + def _delegate_method(self, name: str, *args, **kwargs): if name == "from_coo": return self.from_coo(*args, **kwargs) elif name == "to_coo": diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bef7022a7d10f..0e5c86dcaf2e0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4936,7 +4936,9 @@ def _series(self): # ---------------------------------------------------------------------- # Reindexing and alignment - def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): + def _reindex_axes( + self, axes, level, limit: int | None, tolerance, method, fill_value, copy + ): frame = self columns = axes["columns"] @@ -4960,7 +4962,7 @@ def _reindex_index( copy: bool, level: Level, fill_value=np.nan, - limit=None, + limit: int | None = None, tolerance=None, ): new_index, indexer = self.index.reindex( @@ -4980,7 +4982,7 @@ def _reindex_columns( copy: bool, level: Level, fill_value=None, - limit=None, + limit: int | None = None, tolerance=None, ): new_columns, indexer = self.columns.reindex( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 80ac49d460d3d..40e25e550a8c5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -255,7 +255,7 @@ class NDFrame(PandasObject, indexing.IndexingMixin): _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] - _is_copy: weakref.ReferenceType[NDFrame] | None = None + _is_copy: weakref.ReferenceType[NDFrame] | str | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str @@ -4306,7 +4306,7 @@ def __delitem__(self, key) -> None: # Unsorted @final - def _check_inplace_and_allows_duplicate_labels(self, inplace): + def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " @@ -4384,7 +4384,7 @@ def reindex_like( other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, - limit=None, + limit: int | None = None, tolerance=None, ) -> Self: """ @@ -9628,7 +9628,7 @@ def _align_frame( copy: bool_t | None = None, fill_value=None, method=None, - limit=None, + limit: int | None = None, fill_axis: Axis = 0, ) -> tuple[Self, DataFrame, Index | None]: # defaults @@ -9684,7 +9684,7 @@ def _align_series( copy: bool_t | None = None, fill_value=None, method=None, - limit=None, + limit: int | None = None, fill_axis: Axis = 0, ) -> tuple[Self, Series, Index | None]: is_series = isinstance(self, ABCSeries) @@ -10983,7 +10983,7 @@ def pct_change( self, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", - limit=None, + limit: int | None = None, freq=None, **kwargs, ) -> Self: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a9df4237601db..e87a74e5885b3 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -2249,7 +2249,7 @@ def fillna( method: FillnaOptions | None = None, axis: Axis | None | lib.NoDefault = lib.no_default, inplace: bool = False, - limit=None, + limit: int | None = None, downcast=None, ) -> DataFrame | None: """ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e84a23be6c5bb..e591298e2a58e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2781,7 +2781,7 @@ def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: ) @final - def _fill(self, direction: Literal["ffill", "bfill"], limit=None): + def _fill(self, direction: Literal["ffill", "bfill"], limit: int | None = None): """ Shared function for `pad` and `backfill` to call Cython method. @@ -2868,7 +2868,7 @@ def blk_func(values: ArrayLike) -> ArrayLike: @final @Substitution(name="groupby") - def ffill(self, limit=None): + def ffill(self, limit: int | None = None): """ Forward fill the values. @@ -2893,7 +2893,7 @@ def ffill(self, limit=None): @final @Substitution(name="groupby") - def bfill(self, limit=None): + def bfill(self, limit: int | None = None): """ Backward fill the values. @@ -3789,7 +3789,7 @@ def pct_change( self, periods: int = 1, fill_method: FillnaOptions = "ffill", - limit=None, + limit: int | None = None, freq=None, axis: Axis | lib.NoDefault = lib.no_default, ): diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 85460a04298e6..9f5dd5bec41ef 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -81,7 +81,9 @@ def _get_values(self): f"cannot convert an object of type {type(data)} to a datetimelike index" ) - def _delegate_property_get(self, name): + # error: Signature of "_delegate_property_get" incompatible with supertype + # "PandasDelegate" + def _delegate_property_get(self, name: str): # type: ignore[override] from pandas import Series values = self._get_values() @@ -113,13 +115,13 @@ def _delegate_property_get(self, name): return result - def _delegate_property_set(self, name, value, *args, **kwargs): + def _delegate_property_set(self, name: str, value, *args, **kwargs): raise ValueError( "modifications to a property of a datetimelike object are not supported. " "Change values on the original." ) - def _delegate_method(self, name, *args, **kwargs): + def _delegate_method(self, name: str, *args, **kwargs): from pandas import Series values = self._get_values() diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c93eb0fe3def6..cd7469ca3234b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4188,7 +4188,12 @@ def _validate_can_reindex(self, indexer: np.ndarray) -> None: raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( - self, target, method=None, level=None, limit=None, tolerance=None + self, + target, + method=None, + level=None, + limit: int | None = None, + tolerance: float | None = None, ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 75ce22bd91f41..b740f58097509 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -342,7 +342,7 @@ def __contains__(self, key: Any) -> bool: return contains(self, key, container=self._engine) def reindex( - self, target, method=None, level=None, limit=None, tolerance=None + self, target, method=None, level=None, limit: int | None = None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values (move/add/delete values as necessary) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 3b4a6b2e5dfde..eae70d50e7f95 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -485,7 +485,7 @@ def shift(self, periods: int = 1, freq=None): def period_range( - start=None, end=None, periods: int | None = None, freq=None, name=None + start=None, end=None, periods: int | None = None, freq=None, name: Hashable = None ) -> PeriodIndex: """ Return a fixed frequency PeriodIndex. diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py index 0de9b130f0aab..51b6cebabc2d5 100644 --- a/pandas/core/interchange/dataframe.py +++ b/pandas/core/interchange/dataframe.py @@ -92,7 +92,7 @@ def select_columns_by_name(self, names) -> PandasDataFrameXchg: self._df.loc[:, names], self._nan_as_null, self._allow_copy ) - def get_chunks(self, n_chunks=None): + def get_chunks(self, n_chunks: int | None = None): """ Return an iterator yielding the chunks. """ diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 0408dfd83fedc..407e16e1fa187 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -363,7 +363,7 @@ def shift(self, periods: int, axis: AxisInt, fill_value) -> Self: "shift", periods=periods, axis=axis, fill_value=fill_value ) - def fillna(self, value, limit, inplace: bool, downcast) -> Self: + def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self: if limit is not None: # Do this validation even if we go through one of the no-op paths limit = libalgos.validate_limit(None, limit=limit) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 70d7920ac5bb2..cb644c8329179 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -425,7 +425,7 @@ def shift(self, periods: int, axis: AxisInt, fill_value) -> Self: return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value) - def fillna(self, value, limit, inplace: bool, downcast) -> Self: + def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self: if limit is not None: # Do this validation even if we go through one of the no-op paths limit = libalgos.validate_limit(None, limit=limit) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 2fb323002292a..521ced16e9e97 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -873,7 +873,7 @@ def _datetimelike_compat(func: F) -> F: """ @wraps(func) - def new_func(values, limit=None, mask=None): + def new_func(values, limit: int | None = None, mask=None): if needs_i8_conversion(values.dtype): if mask is None: # This needs to occur before casting to int64 @@ -910,7 +910,11 @@ def _backfill_1d( @_datetimelike_compat -def _pad_2d(values: np.ndarray, limit=None, mask: npt.NDArray[np.bool_] | None = None): +def _pad_2d( + values: np.ndarray, + limit: int | None = None, + mask: npt.NDArray[np.bool_] | None = None, +): mask = _fillna_prep(values, mask) if np.all(values.shape): @@ -922,7 +926,9 @@ def _pad_2d(values: np.ndarray, limit=None, mask: npt.NDArray[np.bool_] | None = @_datetimelike_compat -def _backfill_2d(values, limit=None, mask: npt.NDArray[np.bool_] | None = None): +def _backfill_2d( + values, limit: int | None = None, mask: npt.NDArray[np.bool_] | None = None +): mask = _fillna_prep(values, mask) if np.all(values.shape): @@ -982,7 +988,7 @@ def _interp_limit(invalid, fw_limit, bw_limit): f_idx = set() b_idx = set() - def inner(invalid, limit): + def inner(invalid, limit: int): limit = min(limit, N) windowed = _rolling_window(invalid, limit + 1).all(1) idx = set(np.where(windowed)[0] + limit) | set( diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 3b31932952867..e8864deaaca4d 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -376,7 +376,7 @@ def transform(self, arg, *args, **kwargs): def _downsample(self, f, **kwargs): raise AbstractMethodError(self) - def _upsample(self, f, limit=None, fill_value=None): + def _upsample(self, f, limit: int | None = None, fill_value=None): raise AbstractMethodError(self) def _gotitem(self, key, ndim: int, subset=None): @@ -483,7 +483,7 @@ def _wrap_result(self, result): return result - def ffill(self, limit=None): + def ffill(self, limit: int | None = None): """ Forward fill the values. @@ -503,7 +503,7 @@ def ffill(self, limit=None): """ return self._upsample("ffill", limit=limit) - def nearest(self, limit=None): + def nearest(self, limit: int | None = None): """ Resample by using the nearest value. @@ -563,7 +563,7 @@ def nearest(self, limit=None): """ return self._upsample("nearest", limit=limit) - def bfill(self, limit=None): + def bfill(self, limit: int | None = None): """ Backward fill the new missing values in the resampled data. @@ -665,7 +665,7 @@ def bfill(self, limit=None): """ return self._upsample("bfill", limit=limit) - def fillna(self, method, limit=None): + def fillna(self, method, limit: int | None = None): """ Fill missing values introduced by upsampling. @@ -831,7 +831,7 @@ def interpolate( method: QuantileInterpolation = "linear", *, axis: Axis = 0, - limit=None, + limit: int | None = None, inplace: bool = False, limit_direction: Literal["forward", "backward", "both"] = "forward", limit_area=None, @@ -1311,7 +1311,7 @@ def _adjust_binner_for_upsample(self, binner): binner = binner[:-1] return binner - def _upsample(self, method, limit=None, fill_value=None): + def _upsample(self, method, limit: int | None = None, fill_value=None): """ Parameters ---------- @@ -1440,7 +1440,7 @@ def _downsample(self, how, **kwargs): "as they are not sub or super periods" ) - def _upsample(self, method, limit=None, fill_value=None): + def _upsample(self, method, limit: int | None = None, fill_value=None): """ Parameters ---------- @@ -1532,7 +1532,7 @@ def get_resampler_for_grouping( rule, how=None, fill_method=None, - limit=None, + limit: int | None = None, kind=None, on=None, **kwargs, @@ -1579,7 +1579,7 @@ def __init__( how: str = "mean", axis: Axis = 0, fill_method=None, - limit=None, + limit: int | None = None, kind: str | None = None, convention: Literal["start", "end", "e", "s"] | None = None, origin: Literal["epoch", "start", "start_day", "end", "end_day"] diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 395db8060ce0e..d3806c6850b7a 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -71,7 +71,7 @@ def concat( ignore_index: bool = ..., keys=..., levels=..., - names=..., + names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., @@ -88,7 +88,7 @@ def concat( ignore_index: bool = ..., keys=..., levels=..., - names=..., + names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., @@ -105,7 +105,7 @@ def concat( ignore_index: bool = ..., keys=..., levels=..., - names=..., + names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., @@ -122,7 +122,7 @@ def concat( ignore_index: bool = ..., keys=..., levels=..., - names=..., + names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., @@ -139,7 +139,7 @@ def concat( ignore_index: bool = ..., keys=..., levels=..., - names=..., + names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., @@ -155,7 +155,7 @@ def concat( ignore_index: bool = False, keys=None, levels=None, - names=None, + names: list[HashableT] | None = None, verify_integrity: bool = False, sort: bool = False, copy: bool | None = None, @@ -400,7 +400,7 @@ def __init__( join: str = "outer", keys=None, levels=None, - names=None, + names: list[HashableT] | None = None, ignore_index: bool = False, verify_integrity: bool = False, copy: bool = True, diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py index 92d556a582262..320f441972bd8 100644 --- a/pandas/core/reshape/encoding.py +++ b/pandas/core/reshape/encoding.py @@ -161,7 +161,7 @@ def get_dummies( data_to_encode = data[columns] # validate prefixes and separator to avoid silently dropping cols - def check_len(item, name): + def check_len(item, name: str): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = ( diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index d2b022214167f..c3dacc2172aa7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -364,7 +364,7 @@ def merge_asof( left_by=None, right_by=None, suffixes: Suffixes = ("_x", "_y"), - tolerance=None, + tolerance: int | Timedelta | None = None, allow_exact_matches: bool = True, direction: str = "backward", ) -> DataFrame: @@ -2554,7 +2554,7 @@ def _items_overlap_with_suffix( if not lsuffix and not rsuffix: raise ValueError(f"columns overlap but no suffix specified: {to_rename}") - def renamer(x, suffix): + def renamer(x, suffix: str | None): """ Rename the left and right indices. diff --git a/pandas/core/series.py b/pandas/core/series.py index 06c744c3e36fa..d6bd35639253a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1600,7 +1600,7 @@ def to_string( float_format: str | None = ..., header: bool = ..., index: bool = ..., - length=..., + length: bool = ..., dtype=..., name=..., max_rows: int | None = ..., @@ -1616,7 +1616,7 @@ def to_string( float_format: str | None = ..., header: bool = ..., index: bool = ..., - length=..., + length: bool = ..., dtype=..., name=..., max_rows: int | None = ..., diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index d6adf01f4e12b..a9ce262c356db 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -153,7 +153,7 @@ def _int64_cut_off(shape) -> int: return i return len(shape) - def maybe_lift(lab, size) -> tuple[np.ndarray, int]: + def maybe_lift(lab, size: int) -> tuple[np.ndarray, int]: # promote nan values (assigned -1 label in lab array) # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 1c4727fda4e64..85c5b089b3582 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -133,7 +133,7 @@ def wrapper(self, *args, **kwargs): return _forbid_nonstring_types -def _map_and_wrap(name, docstring): +def _map_and_wrap(name: str | None, docstring: str | None): @forbid_nonstring_types(["bytes"], name=name) def wrapper(self): result = getattr(self._data.array, f"_str_{name}")() @@ -413,7 +413,7 @@ def _get_series_list(self, others): def cat( self, others=None, - sep=None, + sep: str | None = None, na_rep=None, join: AlignJoin = "left", ) -> str | Series | Index: @@ -1043,7 +1043,7 @@ def get(self, i): return self._wrap_result(result) @forbid_nonstring_types(["bytes"]) - def join(self, sep): + def join(self, sep: str): """ Join lists contained as elements in the Series/Index with passed delimiter. @@ -1511,7 +1511,7 @@ def repeat(self, repeats): @forbid_nonstring_types(["bytes"]) def pad( self, - width, + width: int, side: Literal["left", "right", "both"] = "left", fillchar: str = " ", ): @@ -1603,21 +1603,21 @@ def pad( @Appender(_shared_docs["str_pad"] % {"side": "left and right", "method": "center"}) @forbid_nonstring_types(["bytes"]) - def center(self, width, fillchar: str = " "): + def center(self, width: int, fillchar: str = " "): return self.pad(width, side="both", fillchar=fillchar) @Appender(_shared_docs["str_pad"] % {"side": "right", "method": "ljust"}) @forbid_nonstring_types(["bytes"]) - def ljust(self, width, fillchar: str = " "): + def ljust(self, width: int, fillchar: str = " "): return self.pad(width, side="right", fillchar=fillchar) @Appender(_shared_docs["str_pad"] % {"side": "left", "method": "rjust"}) @forbid_nonstring_types(["bytes"]) - def rjust(self, width, fillchar: str = " "): + def rjust(self, width: int, fillchar: str = " "): return self.pad(width, side="left", fillchar=fillchar) @forbid_nonstring_types(["bytes"]) - def zfill(self, width): + def zfill(self, width: int): """ Pad strings in the Series/Index by prepending '0' characters. @@ -2041,7 +2041,7 @@ def rstrip(self, to_strip=None): _shared_docs["str_removefix"] % {"side": "prefix", "other_side": "suffix"} ) @forbid_nonstring_types(["bytes"]) - def removeprefix(self, prefix): + def removeprefix(self, prefix: str): result = self._data.array._str_removeprefix(prefix) return self._wrap_result(result) @@ -2049,12 +2049,12 @@ def removeprefix(self, prefix): _shared_docs["str_removefix"] % {"side": "suffix", "other_side": "prefix"} ) @forbid_nonstring_types(["bytes"]) - def removesuffix(self, suffix): + def removesuffix(self, suffix: str): result = self._data.array._str_removesuffix(suffix) return self._wrap_result(result) @forbid_nonstring_types(["bytes"]) - def wrap(self, width, **kwargs): + def wrap(self, width: int, **kwargs): r""" Wrap strings in Series/Index at specified line width. diff --git a/pandas/core/strings/base.py b/pandas/core/strings/base.py index 10d8e94972725..2672d22935d72 100644 --- a/pandas/core/strings/base.py +++ b/pandas/core/strings/base.py @@ -46,7 +46,7 @@ def _str_count(self, pat, flags: int = 0): @abc.abstractmethod def _str_pad( self, - width, + width: int, side: Literal["left", "right", "both"] = "left", fillchar: str = " ", ): @@ -127,15 +127,15 @@ def _str_rindex(self, sub, start: int = 0, end=None): pass @abc.abstractmethod - def _str_join(self, sep): + def _str_join(self, sep: str): pass @abc.abstractmethod - def _str_partition(self, sep, expand): + def _str_partition(self, sep: str, expand): pass @abc.abstractmethod - def _str_rpartition(self, sep, expand): + def _str_rpartition(self, sep: str, expand): pass @abc.abstractmethod @@ -155,7 +155,7 @@ def _str_translate(self, table): pass @abc.abstractmethod - def _str_wrap(self, width, **kwargs): + def _str_wrap(self, width: int, **kwargs): pass @abc.abstractmethod diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index f8e3f0756dfbd..777233d3c55f1 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -111,7 +111,7 @@ def _str_count(self, pat, flags: int = 0): def _str_pad( self, - width, + width: int, side: Literal["left", "right", "both"] = "left", fillchar: str = " ", ): @@ -283,14 +283,14 @@ def _str_rindex(self, sub, start: int = 0, end=None): f = lambda x: x.rindex(sub, start, end) return self._str_map(f, dtype="int64") - def _str_join(self, sep): + def _str_join(self, sep: str): return self._str_map(sep.join) - def _str_partition(self, sep, expand): + def _str_partition(self, sep: str, expand): result = self._str_map(lambda x: x.partition(sep), dtype="object") return result - def _str_rpartition(self, sep, expand): + def _str_rpartition(self, sep: str, expand): return self._str_map(lambda x: x.rpartition(sep), dtype="object") def _str_len(self): @@ -362,7 +362,7 @@ def _str_rsplit(self, pat=None, n=-1): def _str_translate(self, table): return self._str_map(lambda x: x.translate(table)) - def _str_wrap(self, width, **kwargs): + def _str_wrap(self, width: int, **kwargs): kwargs["width"] = width tw = textwrap.TextWrapper(**kwargs) return self._str_map(lambda s: "\n".join(tw.wrap(s))) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 0265b4404d6ab..70c4af2ed7949 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -312,7 +312,7 @@ def _convert_and_box_cache( def _return_parsed_timezone_results( - result: np.ndarray, timezones, utc: bool, name + result: np.ndarray, timezones, utc: bool, name: str ) -> Index: """ Return results from array_strptime if a %z or %Z directive was passed. diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 98f7b64d2cda0..da35716a5b239 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -450,7 +450,7 @@ def _get_font_size(self, props: Mapping[str, str]) -> float | None: return size return self._pt_to_float(size) - def _select_font_family(self, font_names) -> int | None: + def _select_font_family(self, font_names: Sequence[str]) -> int | None: family = None for name in font_names: family = self.FAMILY_MAP.get(name) diff --git a/pandas/io/html.py b/pandas/io/html.py index ce95c2be8581f..45bbddd72e51f 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -531,7 +531,7 @@ def _expand_colspan_rowspan( return all_texts - def _handle_hidden_tables(self, tbl_list, attr_name): + def _handle_hidden_tables(self, tbl_list, attr_name: str): """ Return list of tables, potentially removing hidden elements diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 588ec639bc2fd..944642bbfe8d3 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -12,6 +12,7 @@ Any, Callable, Generic, + Hashable, Literal, Mapping, TypeVar, @@ -1167,7 +1168,7 @@ def _try_convert_types(self): def _try_convert_data( self, - name, + name: Hashable, data, use_dtypes: bool = True, convert_dates: bool | list[str] = True, diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 9ac31a3e46cd8..35d9d91fb025b 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -927,7 +927,7 @@ def _evaluate_usecols( return {i for i, name in enumerate(names) if usecols(name)} return usecols - def _validate_usecols_names(self, usecols, names): + def _validate_usecols_names(self, usecols, names: Sequence): """ Validates that all usecols are present in a given list of names. If not, raise a ValueError that diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index df675a0a3a6cc..2a6c43bff5047 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -489,21 +489,23 @@ class _DeprecationConfig(NamedTuple): @overload -def validate_integer(name, val: None, min_val: int = ...) -> None: +def validate_integer(name: str, val: None, min_val: int = ...) -> None: ... @overload -def validate_integer(name, val: float, min_val: int = ...) -> int: +def validate_integer(name: str, val: float, min_val: int = ...) -> int: ... @overload -def validate_integer(name, val: int | None, min_val: int = ...) -> int | None: +def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: ... -def validate_integer(name, val: int | float | None, min_val: int = 0) -> int | None: +def validate_integer( + name: str, val: int | float | None, min_val: int = 0 +) -> int | None: """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f083ca792c456..bdf469b1f1d38 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -800,7 +800,7 @@ def select( stop=None, columns=None, iterator: bool = False, - chunksize=None, + chunksize: int | None = None, auto_close: bool = False, ): """ @@ -948,7 +948,7 @@ def select_as_multiple( start=None, stop=None, iterator: bool = False, - chunksize=None, + chunksize: int | None = None, auto_close: bool = False, ): """ @@ -1202,7 +1202,7 @@ def append( columns=None, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, - chunksize=None, + chunksize: int | None = None, expectedrows=None, dropna: bool | None = None, data_columns: Literal[True] | list[str] | None = None, @@ -1734,7 +1734,7 @@ def _write_to_group( complevel: int | None = None, fletcher32=None, min_itemsize: int | dict[str, int] | None = None, - chunksize=None, + chunksize: int | None = None, expectedrows=None, dropna: bool = False, nan_rep=None, @@ -4271,7 +4271,7 @@ def write( # type: ignore[override] complevel=None, fletcher32=None, min_itemsize=None, - chunksize=None, + chunksize: int | None = None, expectedrows=None, dropna: bool = False, nan_rep=None, diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index f23b18fdcb584..e68f4789f0a06 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -259,7 +259,7 @@ def __init__( filepath_or_buffer: FilePath | ReadBuffer[bytes], index=None, encoding: str | None = "ISO-8859-1", - chunksize=None, + chunksize: int | None = None, compression: CompressionOptions = "infer", ) -> None: self._encoding = encoding @@ -439,7 +439,7 @@ def _record_count(self) -> int: return (total_records_length - tail_pad) // self.record_length - def get_chunk(self, size=None) -> pd.DataFrame: + def get_chunk(self, size: int | None = None) -> pd.DataFrame: """ Reads lines from Xport file and returns as dataframe diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 044fd9806d921..894ab110ef012 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -223,7 +223,7 @@ def execute(sql, con, params=None): @overload def read_sql_table( - table_name, + table_name: str, con, schema=..., index_col: str | list[str] | None = ..., @@ -238,7 +238,7 @@ def read_sql_table( @overload def read_sql_table( - table_name, + table_name: str, con, schema=..., index_col: str | list[str] | None = ..., @@ -1034,7 +1034,7 @@ def _query_iterator( self, result, exit_stack: ExitStack, - chunksize: str | None, + chunksize: int | None, columns, coerce_float: bool = True, parse_dates=None, @@ -1072,7 +1072,7 @@ def read( coerce_float: bool = True, parse_dates=None, columns=None, - chunksize=None, + chunksize: int | None = None, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", ) -> DataFrame | Iterator[DataFrame]: from sqlalchemy import select @@ -1386,12 +1386,12 @@ def read_query( def to_sql( self, frame, - name, + name: str, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool = True, index_label=None, schema=None, - chunksize=None, + chunksize: int | None = None, dtype: DtypeArg | None = None, method=None, engine: str = "auto", @@ -1425,10 +1425,10 @@ def insert_records( table: SQLTable, con, frame, - name, + name: str, index: bool | str | list[str] | None = True, schema=None, - chunksize=None, + chunksize: int | None = None, method=None, **engine_kwargs, ) -> int | None: @@ -1449,10 +1449,10 @@ def insert_records( table: SQLTable, con, frame, - name, + name: str, index: bool | str | list[str] | None = True, schema=None, - chunksize=None, + chunksize: int | None = None, method=None, **engine_kwargs, ) -> int | None: @@ -1770,7 +1770,7 @@ def read_query( def prep_table( self, frame, - name, + name: str, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool | str | list[str] | None = True, index_label=None, @@ -1852,7 +1852,7 @@ def to_sql( index: bool = True, index_label=None, schema: str | None = None, - chunksize=None, + chunksize: int | None = None, dtype: DtypeArg | None = None, method=None, engine: str = "auto", @@ -1998,7 +1998,7 @@ def _create_sql_schema( } -def _get_unicode_name(name): +def _get_unicode_name(name: object): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError as err: @@ -2006,7 +2006,7 @@ def _get_unicode_name(name): return uname -def _get_valid_sqlite_name(name): +def _get_valid_sqlite_name(name: object): # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ # -for-sqlite-table-column-names-in-python # Ensure the string can be encoded as UTF-8. @@ -2302,12 +2302,12 @@ def _fetchall_as_list(self, cur): def to_sql( self, frame, - name, + name: str, if_exists: str = "fail", index: bool = True, index_label=None, schema=None, - chunksize=None, + chunksize: int | None = None, dtype: DtypeArg | None = None, method=None, engine: str = "auto", diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 119a71ddc6943..75af0c7bdae79 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -500,10 +500,10 @@ def boxplot_frame( column=None, by=None, ax=None, - fontsize=None, + fontsize: int | None = None, rot: int = 0, grid: bool = True, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, return_type=None, backend=None, @@ -529,11 +529,11 @@ def boxplot_frame_groupby( grouped, subplots: bool = True, column=None, - fontsize=None, + fontsize: int | None = None, rot: int = 0, grid: bool = True, ax=None, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, sharex: bool = False, sharey: bool = True, @@ -797,7 +797,7 @@ def __init__(self, data) -> None: self._parent = data @staticmethod - def _get_call_args(backend_name, data, args, kwargs): + def _get_call_args(backend_name: str, data, args, kwargs): """ This function makes calls to this accessor `__call__` method compatible with the previous `SeriesPlotMethods.__call__` and diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index b39fc93f4f024..d15da170682d3 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -210,7 +210,7 @@ def _make_plot(self) -> None: labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) - def _set_ticklabels(self, ax: Axes, labels) -> None: + def _set_ticklabels(self, ax: Axes, labels: list[str]) -> None: if self.orientation == "vertical": ax.set_xticklabels(labels) else: @@ -248,7 +248,7 @@ def _grouped_plot_by_column( by=None, numeric_only: bool = True, grid: bool = False, - figsize=None, + figsize: tuple[float, float] | None = None, ax=None, layout=None, return_type=None, @@ -307,10 +307,10 @@ def boxplot( column=None, by=None, ax=None, - fontsize=None, + fontsize: int | None = None, rot: int = 0, grid: bool = True, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, return_type=None, **kwds, @@ -456,10 +456,10 @@ def boxplot_frame( column=None, by=None, ax=None, - fontsize=None, + fontsize: int | None = None, rot: int = 0, grid: bool = True, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, return_type=None, **kwds, @@ -487,11 +487,11 @@ def boxplot_frame_groupby( grouped, subplots: bool = True, column=None, - fontsize=None, + fontsize: int | None = None, rot: int = 0, grid: bool = True, ax=None, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, sharex: bool = False, sharey: bool = True, diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 83e3ea8905e1a..cfea83a7740fe 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -127,7 +127,7 @@ def __init__( sharex=None, sharey: bool = False, use_index: bool = True, - figsize=None, + figsize: tuple[float, float] | None = None, grid=None, legend: bool | str = True, rot=None, @@ -140,7 +140,7 @@ def __init__( yticks=None, xlabel: Hashable | None = None, ylabel: Hashable | None = None, - fontsize=None, + fontsize: int | None = None, secondary_y: bool | tuple | list | np.ndarray = False, colormap=None, table: bool = False, @@ -729,7 +729,9 @@ def _adorn_subplots(self): raise ValueError(msg) self.axes[0].set_title(self.title) - def _apply_axis_properties(self, axis: Axis, rot=None, fontsize=None) -> None: + def _apply_axis_properties( + self, axis: Axis, rot=None, fontsize: int | None = None + ) -> None: """ Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed @@ -958,7 +960,7 @@ def on_right(self, i): if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)): return self.data.columns[i] in self.secondary_y - def _apply_style_colors(self, colors, kwds, col_num, label): + def _apply_style_colors(self, colors, kwds, col_num, label: str): """ Manage style and color based on column number and its label. Returns tuple of appropriate style and kwds which "color" may be added. diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 710c20db0526e..076b95a885d5e 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -269,7 +269,7 @@ def _grouped_plot( column=None, by=None, numeric_only: bool = True, - figsize=None, + figsize: tuple[float, float] | None = None, sharex: bool = True, sharey: bool = True, layout=None, @@ -277,7 +277,9 @@ def _grouped_plot( ax=None, **kwargs, ): - if figsize == "default": + # error: Non-overlapping equality check (left operand type: "Optional[Tuple[float, + # float]]", right operand type: "Literal['default']") + if figsize == "default": # type: ignore[comparison-overlap] # allowed to specify mpl default with 'default' raise ValueError( "figsize='default' is no longer supported. " @@ -311,15 +313,15 @@ def _grouped_hist( by=None, ax=None, bins: int = 50, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, sharex: bool = False, sharey: bool = False, rot: float = 90, grid: bool = True, - xlabelsize=None, + xlabelsize: int | None = None, xrot=None, - ylabelsize=None, + ylabelsize: int | None = None, yrot=None, legend: bool = False, **kwargs, @@ -392,11 +394,11 @@ def hist_series( by=None, ax=None, grid: bool = True, - xlabelsize=None, + xlabelsize: int | None = None, xrot=None, - ylabelsize=None, + ylabelsize: int | None = None, yrot=None, - figsize=None, + figsize: tuple[float, float] | None = None, bins: int = 10, legend: bool = False, **kwds, @@ -464,14 +466,14 @@ def hist_frame( column=None, by=None, grid: bool = True, - xlabelsize=None, + xlabelsize: int | None = None, xrot=None, - ylabelsize=None, + ylabelsize: int | None = None, yrot=None, ax=None, sharex: bool = False, sharey: bool = False, - figsize=None, + figsize: tuple[float, float] | None = None, layout=None, bins: int = 10, legend: bool = False, diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index 291a6dff9650d..7db9acdc68d51 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -35,7 +35,7 @@ def scatter_matrix( frame: DataFrame, alpha: float = 0.5, - figsize=None, + figsize: tuple[float, float] | None = None, ax=None, grid: bool = False, diagonal: str = "hist", diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 7d3c857eea2dd..414a20cde62b6 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -443,9 +443,9 @@ def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: def set_ticks_props( axes: Axes | Sequence[Axes], - xlabelsize=None, + xlabelsize: int | None = None, xrot=None, - ylabelsize=None, + ylabelsize: int | None = None, yrot=None, ): import matplotlib.pyplot as plt diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 9f1e166cd6afb..34812aa491cd3 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -151,7 +151,7 @@ class Holiday: def __init__( self, - name, + name: str, year=None, month=None, day=None, @@ -366,7 +366,7 @@ def register(cls) -> None: holiday_calendars[name] = cls -def get_calendar(name): +def get_calendar(name: str): """ Return an instance of a calendar based on its name. @@ -379,7 +379,7 @@ def get_calendar(name): class HolidayCalendarMetaClass(type): - def __new__(cls, clsname, bases, attrs): + def __new__(cls, clsname: str, bases, attrs): calendar_class = super().__new__(cls, clsname, bases, attrs) register(calendar_class) return calendar_class @@ -395,7 +395,7 @@ class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass): end_date = Timestamp(datetime(2200, 12, 31)) _cache = None - def __init__(self, name=None, rules=None) -> None: + def __init__(self, name: str = "", rules=None) -> None: """ Initializes holiday object with a given set a rules. Normally classes just have the rules defined within them. @@ -408,14 +408,14 @@ def __init__(self, name=None, rules=None) -> None: A set of rules used to create the holidays. """ super().__init__() - if name is None: + if not name: name = type(self).__name__ self.name = name if rules is not None: self.rules = rules - def rule_from_name(self, name): + def rule_from_name(self, name: str): for rule in self.rules: if rule.name == name: return rule @@ -579,7 +579,7 @@ class USFederalHolidayCalendar(AbstractHolidayCalendar): ] -def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar): +def HolidayCalendarFactory(name: str, base, other, base_class=AbstractHolidayCalendar): rules = AbstractHolidayCalendar.merge_class(base, other) calendar_class = type(name, (base_class,), {"rules": rules, "name": name}) return calendar_class diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index f03d1ceb507fd..2af68dc3d6df0 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -222,7 +222,10 @@ def validate_args_and_kwargs( def validate_bool_kwarg( - value: BoolishNoneT, arg_name, none_allowed: bool = True, int_allowed: bool = False + value: BoolishNoneT, + arg_name: str, + none_allowed: bool = True, + int_allowed: bool = False, ) -> BoolishNoneT: """ Ensure that argument passed in arg_name can be interpreted as boolean. diff --git a/scripts/run_autotyping.py b/scripts/run_autotyping.py index 0a1156399734d..4c0a3a9cf985f 100644 --- a/scripts/run_autotyping.py +++ b/scripts/run_autotyping.py @@ -26,8 +26,15 @@ def main(argv: Sequence[str] | None = None) -> None: "codemod", "autotyping.AutotypeCommand", *args.paths, - "--aggressive", "--no-format", + "--safe", + # all except 'guess-common-names' from 'aggresive' + "--bool-param", + "--int-param", + "--float-param", + "--str-param", + "--bytes-param", + "--annotate-imprecise-magics", ], check=True, )
Added many of the `--guess-common-names` guestimations. This new option would still make changes, so I disabled it.
https://api.github.com/repos/pandas-dev/pandas/pulls/52232
2023-03-27T03:06:35Z
2023-03-27T08:56:49Z
2023-03-27T08:56:49Z
2023-08-09T15:08:36Z
DataFrame transform with fillna test GH#26840
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index d7dc2d8937467..c1201c33123ab 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -361,6 +361,20 @@ def test_dispatch_transform(tsframe): tm.assert_frame_equal(filled, expected) +def test_transform_fillna_null(): + df = DataFrame( + dict( + price=[10, 10, 20, 20, 30, 30], + color=[10, 10, 20, 20, 30, 30], + cost=(100, 200, 300, 400, 500, 600), + ) + ) + with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"): + df.groupby(["price"]).transform("fillna") + with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"): + df.groupby(["price"]).fillna() + + def test_transform_transformation_func(transformation_func): # GH 30918 df = DataFrame(
- [ ] closes #26840 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/52230
2023-03-26T23:56:47Z
2023-03-27T18:04:29Z
2023-03-27T18:04:29Z
2023-03-28T00:44:17Z
REF: de-duplicate some test code
diff --git a/pandas/tests/extension/masked_shared.py b/pandas/tests/extension/masked_shared.py new file mode 100644 index 0000000000000..4c6ce20379419 --- /dev/null +++ b/pandas/tests/extension/masked_shared.py @@ -0,0 +1,121 @@ +""" +Shared test code for IntegerArray/FloatingArray/BooleanArray. +""" +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension import base + + +class Arithmetic(base.BaseArithmeticOpsTests): + def check_opname(self, ser: pd.Series, op_name: str, other, exc=None): + # overwriting to indicate ops don't raise an error + super().check_opname(ser, op_name, other, exc=None) + + def _check_divmod_op(self, ser: pd.Series, op, other, exc=None): + super()._check_divmod_op(ser, op, other, None) + + +class Comparison(base.BaseComparisonOpsTests): + def _check_op( + self, ser: pd.Series, op, other, op_name: str, exc=NotImplementedError + ): + if exc is None: + result = op(ser, other) + # Override to do the astype to boolean + expected = ser.combine(other, op).astype("boolean") + self.assert_series_equal(result, expected) + else: + with pytest.raises(exc): + op(ser, other) + + def check_opname(self, ser: pd.Series, op_name: str, other, exc=None): + super().check_opname(ser, op_name, other, exc=None) + + def _compare_other(self, ser: pd.Series, data, op, other): + op_name = f"__{op.__name__}__" + self.check_opname(ser, op_name, other) + + +class NumericReduce(base.BaseNumericReduceTests): + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + + cmp_dtype = "int64" + if ser.dtype.kind == "f": + # Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "numpy_dtype" + cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr] + + if op_name == "count": + result = getattr(ser, op_name)() + expected = getattr(ser.dropna().astype(cmp_dtype), op_name)() + else: + result = getattr(ser, op_name)(skipna=skipna) + expected = getattr(ser.dropna().astype(cmp_dtype), op_name)(skipna=skipna) + if not skipna and ser.isna().any(): + expected = pd.NA + tm.assert_almost_equal(result, expected) + + +class Accumulation(base.BaseAccumulateTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): + pass + + def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + length = 64 + if not IS64 or is_platform_windows(): + # Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "itemsize" + if not ser.dtype.itemsize == 8: # type: ignore[union-attr] + length = 32 + + if ser.dtype.name.startswith("U"): + expected_dtype = f"UInt{length}" + elif ser.dtype.name.startswith("I"): + expected_dtype = f"Int{length}" + elif ser.dtype.name.startswith("F"): + # Incompatible types in assignment (expression has type + # "Union[dtype[Any], ExtensionDtype]", variable has type "str") + expected_dtype = ser.dtype # type: ignore[assignment] + + if op_name == "cumsum": + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name in ["cummax", "cummin"]: + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=ser.dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name == "cumprod": + result = getattr(ser[:12], op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser[:12].astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + else: + raise NotImplementedError(f"{op_name} not supported") diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py index 60c78b46a4832..5ac90bf17ddc9 100644 --- a/pandas/tests/extension/test_floating.py +++ b/pandas/tests/extension/test_floating.py @@ -25,7 +25,10 @@ Float32Dtype, Float64Dtype, ) -from pandas.tests.extension import base +from pandas.tests.extension import ( + base, + masked_shared, +) def make_data(): @@ -92,11 +95,7 @@ class TestDtype(base.BaseDtypeTests): pass -class TestArithmeticOps(base.BaseArithmeticOpsTests): - def check_opname(self, s, op_name, other, exc=None): - # overwriting to indicate ops don't raise an error - super().check_opname(s, op_name, other, exc=None) - +class TestArithmeticOps(masked_shared.Arithmetic): def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: sdtype = tm.get_dtype(s) @@ -120,28 +119,9 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError): with pytest.raises(exc): op(s, other) - def _check_divmod_op(self, s, op, other, exc=None): - super()._check_divmod_op(s, op, other, None) - -class TestComparisonOps(base.BaseComparisonOpsTests): - # TODO: share with IntegerArray? - def _check_op(self, s, op, other, op_name, exc=NotImplementedError): - if exc is None: - result = op(s, other) - # Override to do the astype to boolean - expected = s.combine(other, op).astype("boolean") - self.assert_series_equal(result, expected) - else: - with pytest.raises(exc): - op(s, other) - - def check_opname(self, s, op_name, other, exc=None): - super().check_opname(s, op_name, other, exc=None) - - def _compare_other(self, s, data, op, other): - op_name = f"__{op.__name__}__" - self.check_opname(s, op_name, other) +class TestComparisonOps(masked_shared.Comparison): + pass class TestInterface(base.BaseInterfaceTests): @@ -184,21 +164,8 @@ class TestGroupby(base.BaseGroupbyTests): pass -class TestNumericReduce(base.BaseNumericReduceTests): - def check_reduce(self, s, op_name, skipna): - # overwrite to ensure pd.NA is tested instead of np.nan - # https://github.com/pandas-dev/pandas/issues/30958 - if op_name == "count": - result = getattr(s, op_name)() - expected = getattr(s.dropna().astype(s.dtype.numpy_dtype), op_name)() - else: - result = getattr(s, op_name)(skipna=skipna) - expected = getattr(s.dropna().astype(s.dtype.numpy_dtype), op_name)( - skipna=skipna - ) - if not skipna and s.isna().any(): - expected = pd.NA - tm.assert_almost_equal(result, expected) +class TestNumericReduce(masked_shared.NumericReduce): + pass @pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py") @@ -219,7 +186,5 @@ class Test2DCompat(base.Dim2CompatTests): pass -class TestAccumulation(base.BaseAccumulateTests): - @pytest.mark.parametrize("skipna", [True, False]) - def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): - pass +class TestAccumulation(masked_shared.Accumulation): + pass diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index 936764c3627d0..c638977b959a7 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -16,11 +16,6 @@ import numpy as np import pytest -from pandas.compat import ( - IS64, - is_platform_windows, -) - import pandas as pd import pandas._testing as tm from pandas.api.types import ( @@ -37,7 +32,10 @@ UInt32Dtype, UInt64Dtype, ) -from pandas.tests.extension import base +from pandas.tests.extension import ( + base, + masked_shared, +) def make_data(): @@ -109,11 +107,7 @@ class TestDtype(base.BaseDtypeTests): pass -class TestArithmeticOps(base.BaseArithmeticOpsTests): - def check_opname(self, s, op_name, other, exc=None): - # overwriting to indicate ops don't raise an error - super().check_opname(s, op_name, other, exc=None) - +class TestArithmeticOps(masked_shared.Arithmetic): def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: sdtype = tm.get_dtype(s) @@ -145,27 +139,9 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError): with pytest.raises(exc): op(s, other) - def _check_divmod_op(self, s, op, other, exc=None): - super()._check_divmod_op(s, op, other, None) - - -class TestComparisonOps(base.BaseComparisonOpsTests): - def _check_op(self, s, op, other, op_name, exc=NotImplementedError): - if exc is None: - result = op(s, other) - # Override to do the astype to boolean - expected = s.combine(other, op).astype("boolean") - self.assert_series_equal(result, expected) - else: - with pytest.raises(exc): - op(s, other) - - def check_opname(self, s, op_name, other, exc=None): - super().check_opname(s, op_name, other, exc=None) - def _compare_other(self, s, data, op, other): - op_name = f"__{op.__name__}__" - self.check_opname(s, op_name, other) +class TestComparisonOps(masked_shared.Comparison): + pass class TestInterface(base.BaseInterfaceTests): @@ -212,19 +188,8 @@ class TestGroupby(base.BaseGroupbyTests): pass -class TestNumericReduce(base.BaseNumericReduceTests): - def check_reduce(self, s, op_name, skipna): - # overwrite to ensure pd.NA is tested instead of np.nan - # https://github.com/pandas-dev/pandas/issues/30958 - if op_name == "count": - result = getattr(s, op_name)() - expected = getattr(s.dropna().astype("int64"), op_name)() - else: - result = getattr(s, op_name)(skipna=skipna) - expected = getattr(s.dropna().astype("int64"), op_name)(skipna=skipna) - if not skipna and s.isna().any(): - expected = pd.NA - tm.assert_almost_equal(result, expected) +class TestNumericReduce(masked_shared.NumericReduce): + pass @pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py") @@ -232,54 +197,8 @@ class TestBooleanReduce(base.BaseBooleanReduceTests): pass -class TestAccumulation(base.BaseAccumulateTests): - def check_accumulate(self, s, op_name, skipna): - # overwrite to ensure pd.NA is tested instead of np.nan - # https://github.com/pandas-dev/pandas/issues/30958 - length = 64 - if not IS64 or is_platform_windows(): - if not s.dtype.itemsize == 8: - length = 32 - - if s.dtype.name.startswith("U"): - expected_dtype = f"UInt{length}" - else: - expected_dtype = f"Int{length}" - - if op_name == "cumsum": - result = getattr(s, op_name)(skipna=skipna) - expected = pd.Series( - pd.array( - getattr(s.astype("float64"), op_name)(skipna=skipna), - dtype=expected_dtype, - ) - ) - tm.assert_series_equal(result, expected) - elif op_name in ["cummax", "cummin"]: - result = getattr(s, op_name)(skipna=skipna) - expected = pd.Series( - pd.array( - getattr(s.astype("float64"), op_name)(skipna=skipna), - dtype=s.dtype, - ) - ) - tm.assert_series_equal(result, expected) - elif op_name == "cumprod": - result = getattr(s[:12], op_name)(skipna=skipna) - expected = pd.Series( - pd.array( - getattr(s[:12].astype("float64"), op_name)(skipna=skipna), - dtype=expected_dtype, - ) - ) - tm.assert_series_equal(result, expected) - - else: - raise NotImplementedError(f"{op_name} not supported") - - @pytest.mark.parametrize("skipna", [True, False]) - def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): - pass +class TestAccumulation(masked_shared.Accumulation): + pass class TestPrinting(base.BasePrintingTests): diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index 06170d2241f01..a81e013290b64 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -106,95 +106,64 @@ def test_ndframe_getitem_caching_issue(self, request, using_copy_on_write): # Series ( [ - pd.Series(1, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.Series(2, index=["c", "d"]).set_flags( - allows_duplicate_labels=False - ), + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["c", "d"]), ], {}, ), ( [ - pd.Series(1, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.Series(2, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), ], {"ignore_index": True}, ), ( [ - pd.Series(1, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.Series(2, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), ], {"axis": 1}, ), # Frame ( [ - pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.DataFrame({"A": [1, 2]}, index=["c", "d"]).set_flags( - allows_duplicate_labels=False - ), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["c", "d"]), ], {}, ), ( [ - pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), ], {"ignore_index": True}, ), ( [ - pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.DataFrame({"B": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"B": [1, 2]}, index=["a", "b"]), ], {"axis": 1}, ), # Series / Frame ( [ - pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.Series( - [1, 2], - index=["a", "b"], - name="B", - ).set_flags( - allows_duplicate_labels=False, - ), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.Series([1, 2], index=["a", "b"], name="B"), ], {"axis": 1}, ), ], ) def test_concat(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] result = pd.concat(objs, **kwargs) assert result.flags.allows_duplicate_labels is False @pytest.mark.parametrize( - "left, right, kwargs, expected", + "left, right, expected", [ # false false false pytest.param( @@ -204,7 +173,6 @@ def test_concat(self, objs, kwargs): pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags( allows_duplicate_labels=False ), - {"left_index": True, "right_index": True}, False, marks=not_implemented, ), @@ -214,7 +182,6 @@ def test_concat(self, objs, kwargs): allows_duplicate_labels=False ), pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), - {"left_index": True, "right_index": True}, False, marks=not_implemented, ), @@ -222,13 +189,12 @@ def test_concat(self, objs, kwargs): ( pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), - {"left_index": True, "right_index": True}, True, ), ], ) - def test_merge(self, left, right, kwargs, expected): - result = pd.merge(left, right, **kwargs) + def test_merge(self, left, right, expected): + result = pd.merge(left, right, left_index=True, right_index=True) assert result.flags.allows_duplicate_labels is expected @not_implemented @@ -335,18 +301,15 @@ def test_getitem_raises(self, getter, target): [ ( [ - pd.Series(1, index=[0, 1], name="a").set_flags( - allows_duplicate_labels=False - ), - pd.Series(2, index=[0, 1], name="a").set_flags( - allows_duplicate_labels=False - ), + pd.Series(1, index=[0, 1], name="a"), + pd.Series(2, index=[0, 1], name="a"), ], {"axis": 1}, ) ], ) def test_concat_raises(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.concat(objs, **kwargs) diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index bd3686354e432..c1403fc68c25c 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -40,8 +40,8 @@ def groupby_series(request): return request.param -@pytest.mark.parametrize("how", ["method", "agg", "transform"]) -def test_groupby_raises_string(how, by, groupby_series, groupby_func): +@pytest.fixture +def df_with_string_col(): df = DataFrame( { "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], @@ -50,6 +50,62 @@ def test_groupby_raises_string(how, by, groupby_series, groupby_func): "d": list("xyzwtyuio"), } ) + return df + + +@pytest.fixture +def df_with_datetime_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), + } + ) + return df + + +@pytest.fixture +def df_with_cat_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": Categorical( + ["a", "a", "a", "a", "b", "b", "b", "b", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ), + } + ) + return df + + +def _call_and_check(klass, msg, how, gb, groupby_func, args): + if klass is None: + if how == "method": + getattr(gb, groupby_func)(*args) + elif how == "agg": + gb.agg(groupby_func, *args) + else: + gb.transform(groupby_func, *args) + else: + with pytest.raises(klass, match=msg): + if how == "method": + getattr(gb, groupby_func)(*args) + elif how == "agg": + gb.agg(groupby_func, *args) + else: + gb.transform(groupby_func, *args) + + +@pytest.mark.parametrize("how", ["method", "agg", "transform"]) +def test_groupby_raises_string( + how, by, groupby_series, groupby_func, df_with_string_col +): + df = df_with_string_col args = get_groupby_method_args(groupby_func, df) gb = df.groupby(by=by) @@ -109,33 +165,12 @@ def test_groupby_raises_string(how, by, groupby_series, groupby_func): "var": (TypeError, "could not convert string to float"), }[groupby_func] - if klass is None: - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) - else: - with pytest.raises(klass, match=msg): - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) + _call_and_check(klass, msg, how, gb, groupby_func, args) @pytest.mark.parametrize("how", ["agg", "transform"]) -def test_groupby_raises_string_udf(how, by, groupby_series): - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": list("xyzwtyuio"), - } - ) +def test_groupby_raises_string_udf(how, by, groupby_series, df_with_string_col): + df = df_with_string_col gb = df.groupby(by=by) if groupby_series: @@ -150,16 +185,11 @@ def func(x): @pytest.mark.parametrize("how", ["agg", "transform"]) @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) -def test_groupby_raises_string_np(how, by, groupby_series, groupby_func_np): +def test_groupby_raises_string_np( + how, by, groupby_series, groupby_func_np, df_with_string_col +): # GH#50749 - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": list("xyzwtyuio"), - } - ) + df = df_with_string_col gb = df.groupby(by=by) if groupby_series: @@ -170,23 +200,14 @@ def test_groupby_raises_string_np(how, by, groupby_series, groupby_func_np): np.mean: (TypeError, "Could not convert xy?z?w?t?y?u?i?o? to numeric"), }[groupby_func_np] - if klass is None: - getattr(gb, how)(groupby_func_np) - else: - with pytest.raises(klass, match=msg): - getattr(gb, how)(groupby_func_np) + _call_and_check(klass, msg, how, gb, groupby_func_np, tuple()) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) -def test_groupby_raises_datetime(how, by, groupby_series, groupby_func): - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), - } - ) +def test_groupby_raises_datetime( + how, by, groupby_series, groupby_func, df_with_datetime_col +): + df = df_with_datetime_col args = get_groupby_method_args(groupby_func, df) gb = df.groupby(by=by) @@ -234,41 +255,18 @@ def test_groupby_raises_datetime(how, by, groupby_series, groupby_func): "var": (TypeError, "datetime64 type does not support var operations"), }[groupby_func] - if klass is None: - warn = None - warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" - if groupby_func in ["any", "all"]: - warn = FutureWarning - - with tm.assert_produces_warning(warn, match=warn_msg): - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) + warn = None + warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" + if groupby_func in ["any", "all"]: + warn = FutureWarning - else: - with pytest.raises(klass, match=msg): - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) + with tm.assert_produces_warning(warn, match=warn_msg): + _call_and_check(klass, msg, how, gb, groupby_func, args) @pytest.mark.parametrize("how", ["agg", "transform"]) -def test_groupby_raises_datetime_udf(how, by, groupby_series): - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), - } - ) - +def test_groupby_raises_datetime_udf(how, by, groupby_series, df_with_datetime_col): + df = df_with_datetime_col gb = df.groupby(by=by) if groupby_series: @@ -283,16 +281,11 @@ def func(x): @pytest.mark.parametrize("how", ["agg", "transform"]) @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) -def test_groupby_raises_datetime_np(how, by, groupby_series, groupby_func_np): +def test_groupby_raises_datetime_np( + how, by, groupby_series, groupby_func_np, df_with_datetime_col +): # GH#50749 - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), - } - ) + df = df_with_datetime_col gb = df.groupby(by=by) if groupby_series: @@ -303,30 +296,15 @@ def test_groupby_raises_datetime_np(how, by, groupby_series, groupby_func_np): np.mean: (None, ""), }[groupby_func_np] - if klass is None: - getattr(gb, how)(groupby_func_np) - else: - with pytest.raises(klass, match=msg): - getattr(gb, how)(groupby_func_np) + _call_and_check(klass, msg, how, gb, groupby_func_np, tuple()) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) def test_groupby_raises_category( - how, by, groupby_series, groupby_func, using_copy_on_write + how, by, groupby_series, groupby_func, using_copy_on_write, df_with_cat_col ): # GH#50749 - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": Categorical( - ["a", "a", "a", "a", "b", "b", "b", "b", "c"], - categories=["a", "b", "c", "d"], - ordered=True, - ), - } - ) + df = df_with_cat_col args = get_groupby_method_args(groupby_func, df) gb = df.groupby(by=by) @@ -452,38 +430,13 @@ def test_groupby_raises_category( ), }[groupby_func] - if klass is None: - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) - else: - with pytest.raises(klass, match=msg): - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) + _call_and_check(klass, msg, how, gb, groupby_func, args) @pytest.mark.parametrize("how", ["agg", "transform"]) -def test_groupby_raises_category_udf(how, by, groupby_series): +def test_groupby_raises_category_udf(how, by, groupby_series, df_with_cat_col): # GH#50749 - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": Categorical( - ["a", "a", "a", "a", "b", "b", "b", "b", "c"], - categories=["a", "b", "c", "d"], - ordered=True, - ), - } - ) + df = df_with_cat_col gb = df.groupby(by=by) if groupby_series: @@ -498,20 +451,11 @@ def func(x): @pytest.mark.parametrize("how", ["agg", "transform"]) @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) -def test_groupby_raises_category_np(how, by, groupby_series, groupby_func_np): +def test_groupby_raises_category_np( + how, by, groupby_series, groupby_func_np, df_with_cat_col +): # GH#50749 - df = DataFrame( - { - "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": Categorical( - ["a", "a", "a", "a", "b", "b", "b", "b", "c"], - categories=["a", "b", "c", "d"], - ordered=True, - ), - } - ) + df = df_with_cat_col gb = df.groupby(by=by) if groupby_series: @@ -525,33 +469,25 @@ def test_groupby_raises_category_np(how, by, groupby_series, groupby_func_np): ), }[groupby_func_np] - if klass is None: - getattr(gb, how)(groupby_func_np) - else: - with pytest.raises(klass, match=msg): - getattr(gb, how)(groupby_func_np) + _call_and_check(klass, msg, how, gb, groupby_func_np, tuple()) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) def test_groupby_raises_category_on_category( - how, by, groupby_series, groupby_func, observed, using_copy_on_write + how, + by, + groupby_series, + groupby_func, + observed, + using_copy_on_write, + df_with_cat_col, ): # GH#50749 - df = DataFrame( - { - "a": Categorical( - ["a", "a", "a", "a", "b", "b", "b", "b", "c"], - categories=["a", "b", "c", "d"], - ordered=True, - ), - "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], - "c": range(9), - "d": Categorical( - ["a", "a", "a", "a", "b", "b", "c", "c", "c"], - categories=["a", "b", "c", "d"], - ordered=True, - ), - } + df = df_with_cat_col + df["a"] = Categorical( + ["a", "a", "a", "a", "b", "b", "b", "b", "c"], + categories=["a", "b", "c", "d"], + ordered=True, ) args = get_groupby_method_args(groupby_func, df) gb = df.groupby(by=by, observed=observed) @@ -662,21 +598,7 @@ def test_groupby_raises_category_on_category( ), }[groupby_func] - if klass is None: - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) - else: - with pytest.raises(klass, match=msg): - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) + _call_and_check(klass, msg, how, gb, groupby_func, args) def test_subsetting_columns_axis_1_raises():
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52228
2023-03-26T22:11:09Z
2023-03-27T18:09:20Z
2023-03-27T18:09:19Z
2023-03-27T18:45:35Z
DOC: reshaping.rst Update
diff --git a/doc/source/_static/reshaping_pivot.png b/doc/source/_static/reshaping_pivot.png index c6c37a80744d4..6d779562adcac 100644 Binary files a/doc/source/_static/reshaping_pivot.png and b/doc/source/_static/reshaping_pivot.png differ diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index 6a34998ccd0a6..237ea1a4dd9c6 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -13,7 +13,7 @@ Reshaping by pivoting DataFrame objects .. image:: ../_static/reshaping_pivot.png -Data is often stored in so-called "stacked" or "record" format: +Data is often stored in so-called "stacked" or "record" format. In a "record" or "wide" format typically there is one row for each subject. In the "stacked" or "long" format there are multiple rows for each subject where applicable. .. ipython:: python
- [x] closes #52142 - [x] Modified an entry in the latest `doc/source/user_guide/reshaping.rst` to include synonymous terms and explination - [x] Modified image `doc/source/static/reshaping_pivot.png
https://api.github.com/repos/pandas-dev/pandas/pulls/52227
2023-03-26T19:32:12Z
2023-03-27T18:11:22Z
2023-03-27T18:11:22Z
2023-03-27T18:11:31Z
CI: replace flake8-pyi with ruff
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 02acba4804eb3..d4baa638bdda2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,7 +28,7 @@ repos: types_or: [python, pyi] additional_dependencies: [black==23.1.0] - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.255 + rev: v0.0.259 hooks: - id: ruff args: [--exit-non-zero-on-fix] @@ -392,14 +392,6 @@ repos: files: ^pandas/ exclude: ^(pandas/_libs/|pandas/tests/|pandas/errors/__init__.py$|pandas/_version.py) types: [python] - - id: flake8-pyi - name: flake8-pyi - entry: flake8 --extend-ignore=E301,E302,E305,E701,E704 - types: [pyi] - language: python - additional_dependencies: - - flake8==5.0.4 - - flake8-pyi==22.8.1 - id: future-annotations name: import annotations from __future__ entry: 'from __future__ import annotations' diff --git a/pyproject.toml b/pyproject.toml index da831dc9f8bd4..2aadfd7bd41ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -215,6 +215,8 @@ select = [ "PLE", "PLR", "PLW", # misc lints "PIE", + # flake8-pyi + "PYI", # tidy imports "TID", # implicit string concatenation @@ -266,6 +268,14 @@ ignore = [ "PLR0915", # Global statements are discouraged "PLW0603", + # Docstrings should not be included in stubs + "PYI021", + # Use typing_extensions.TypeAlias for type aliases + # "PYI026", # not yet implemented + # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax) + # "PYI027", # not yet implemented + # while int | float can be shortened to float, the former is more explicit + # "PYI041", # not yet implemented # Additional checks that don't pass yet # Within an except clause, raise exceptions with ... @@ -281,6 +291,8 @@ exclude = [ "doc/build/*.py", "doc/temp/*.py", ".eggs/*.py", + # vendored files + "pandas/util/version/*", "versioneer.py", # exclude asv benchmark environments from linting "env", @@ -292,8 +304,9 @@ exclude = [ # to be enabled gradually "pandas/core/*" = ["PLR5501", "PLW2901"] "pandas/io/*" = ["PLW2901"] -"pandas/tests/*" = ["PLW2901"] +"pandas/tests/*" = ["B028", "PLW2901"] "pandas/plotting/*" = ["PLW2901"] +"scripts/*" = ["B028"] # Keep this one enabled "pandas/_typing.py" = ["TCH"] diff --git a/setup.cfg b/setup.cfg index f27daa56cbfc6..c269237f97211 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # Although ruff is now the main linter for style checks, this section -# is still needed for validate_docstrings.py and flake8-pyi +# is still needed for validate_docstrings.py ignore = # space before : (needed for how black formats slicing) E203, @@ -12,17 +12,7 @@ ignore = # module level import not at top of file E402, # do not assign a lambda expression, use a def - E731, - # found modulo formatter (incorrect picks up mod operations) - Y002, - # Docstrings should not be included in stubs - Y021, - # Use typing_extensions.TypeAlias for type aliases - Y026, - # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax) - Y027, - # while int | float can be shortened to float, the former is more explicit - Y041 + E731 exclude = doc/sphinxext/*.py, doc/build/*.py,
Not all rules are covered by ruff but, from my point of view, the important ones are covered.
https://api.github.com/repos/pandas-dev/pandas/pulls/52226
2023-03-26T17:26:31Z
2023-03-26T19:34:59Z
2023-03-26T19:34:59Z
2023-08-09T15:08:36Z
BUG: Index with duplicate labels raises ValueError in Dataframe.query
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c2017c0acc55e..6751227fcdddc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9,6 +9,7 @@ labeling information """ from __future__ import annotations +from pandas.core.indexes.range import RangeIndex import collections from collections import abc @@ -4330,7 +4331,7 @@ def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... - def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: + def query(self,expr: str, *,inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. @@ -4467,16 +4468,34 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No A B C C 0 1 10 10 """ + inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None - res = self.eval(expr, **kwargs) + if self.index.duplicated().any(): + engine='numexpr' + # Create a copy of the dataframe with a unique index to avoid reindexing errors + unique_index = RangeIndex(len(self.index)) + df_copy = self.copy() + df_copy.index = unique_index + + # Filter the copied dataframe + filtered_df = df_copy.query(expr, engine=engine) + + # Map the filtered index back to the original index labels + index_mapping = dict(zip(unique_index, self.index)) + filtered_df.index = filtered_df.index.map(index_mapping) + + return filtered_df + + res = self.eval(expr, **kwargs) try: result = self.loc[res] + except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query
- [x] closes #51815 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52224
2023-03-26T16:14:27Z
2023-05-03T00:16:14Z
null
2023-05-03T00:16:14Z
BUG: __from_arrow__ doesn't accept pyarrow null arrays for numeric ma…
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index a037e50593737..fd19c84f8ab23 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -316,7 +316,9 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ +- Bug where the ``__from_arrow__`` method of masked ExtensionDtypes(e.g. :class:`Float64Dtype`, :class:`BooleanDtype`) would not accept pyarrow arrays of type ``pyarrow.null()`` (:issue:`52223`) - Bug in :meth:`Series.rank` returning wrong order for small values with ``Float64`` dtype (:issue:`52471`) +- Styler ^^^^^^ diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index 6e6ef6a2c20a8..2a053fac2985c 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -42,6 +42,11 @@ def pyarrow_array_to_numpy_and_mask( """ dtype = np.dtype(dtype) + if pyarrow.types.is_null(arr.type): + # No initialization of data is needed since everything is null + data = np.empty(len(arr), dtype=dtype) + mask = np.zeros(len(arr), dtype=bool) + return data, mask buflist = arr.buffers() # Since Arrow buffers might contain padding and the data might be offset, # the buffer gets sliced here before handing it to numpy. diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 54bd4220bc060..f6bc8a87a4c60 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -108,14 +108,22 @@ def __from_arrow__( """ import pyarrow - if array.type != pyarrow.bool_(): + if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type): raise TypeError(f"Expected array of boolean type, got {array.type} instead") if isinstance(array, pyarrow.Array): chunks = [array] + length = len(array) else: # pyarrow.ChunkedArray chunks = array.chunks + length = array.length() + + if pyarrow.types.is_null(array.type): + mask = np.ones(length, dtype=bool) + # No need to init data, since all null + data = np.empty(length, dtype=bool) + return BooleanArray(data, mask) results = [] for arr in chunks: diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py index 8d629b88edd26..344946ad68d32 100644 --- a/pandas/core/arrays/numeric.py +++ b/pandas/core/arrays/numeric.py @@ -76,7 +76,9 @@ def __from_arrow__( array_class = self.construct_array_type() pyarrow_type = pyarrow.from_numpy_dtype(self.type) - if not array.type.equals(pyarrow_type): + if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null( + array.type + ): # test_from_arrow_type_error raise for string, but allow # through itemsize conversion GH#31896 rt_dtype = pandas_dtype(array.type.to_pandas_dtype()) diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index 6b0081321ef22..fc2094bd9f4a8 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -184,6 +184,15 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays): tm.assert_numpy_array_equal(mask, mask_expected_empty) +@pytest.mark.parametrize( + "arr", [pa.nulls(10), pa.chunked_array([pa.nulls(4), pa.nulls(6)])] +) +def test_from_arrow_null(data, arr): + res = data.dtype.__from_arrow__(arr) + assert res.isna().all() + assert len(res) == 10 + + def test_from_arrow_type_error(data): # ensure that __from_arrow__ returns a TypeError when getting a wrong # array type
…sked types - [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52223
2023-03-26T15:33:28Z
2023-04-07T21:05:08Z
2023-04-07T21:05:08Z
2023-05-24T15:31:25Z
DOC: Clarifies the description of if_sheet_exists in pd.ExcelWriter (#52189)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 3c1ecffe21353..8c3bbb7798f68 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -942,8 +942,8 @@ class ExcelWriter(metaclass=abc.ABCMeta): * error: raise a ValueError. * new: Create a new sheet, with a name determined by the engine. * replace: Delete the contents of the sheet before writing to it. - * overlay: Write contents to the existing sheet without removing the old - contents. + * overlay: Write contents to the existing sheet without first removing, + but possibly over top of, the existing contents. .. versionadded:: 1.3.0
- [ ] closes #52189 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52222
2023-03-26T13:26:13Z
2023-03-27T18:26:51Z
2023-03-27T18:26:51Z
2023-03-27T18:27:03Z
BUG: Timestamp fails when fold is passed with positional args
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index bac567b537edc..4486d9217bfae 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -154,6 +154,7 @@ Datetimelike - Bug in :meth:`Timestamp.round` with values close to the implementation bounds returning incorrect results instead of raising ``OutOfBoundsDatetime`` (:issue:`51494`) - :meth:`arrays.DatetimeArray.map` can now take a ``na_action`` argument. :meth:`DatetimeIndex.map` with ``na_action="ignore"`` now works as expected. (:issue:`51644`) - Bug in :meth:`arrays.DatetimeArray.map` and :meth:`DatetimeIndex.map`, where the supplied callable operated array-wise instead of element-wise (:issue:`51977`) +- Bug in :class:`Timestamp` raising an error when passing fold when constructing from positional arguments. - Timedelta diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 10a331f302cc4..6307323d42f2f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1269,7 +1269,23 @@ cdef class _Timestamp(ABCTimestamp): # Python front end to C extension type _Timestamp # This serves as the box for datetime64 +def _fix_positional_arguments(cls): + original_new = cls.__new__ + + def updated_new(cls, *args, **kwargs): + # GH#52117 If we passed positional args, then _ts_input + # is now set to year and the other positional args + # are shifted to the left. Let's shift back + if len(args) > 2 and all(isinstance(arg, int) for arg in args[:3]): + return original_new(cls, _no_input, *args, **kwargs) + else: + return original_new(cls, *args, **kwargs) + + cls.__new__ = updated_new + return cls + +@_fix_positional_arguments class Timestamp(_Timestamp): """ Pandas replacement for python datetime.datetime object. diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 02244c1686cab..8c88ceac7d8c9 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -340,12 +340,7 @@ def test_constructor_positional_with_tzinfo(self): @pytest.mark.parametrize("kwd", ["nanosecond", "microsecond", "second", "minute"]) def test_constructor_positional_keyword_mixed_with_tzinfo(self, kwd, request): - # TODO: if we passed microsecond with a keyword we would mess up - # xref GH#45307 - if kwd != "nanosecond": - # nanosecond is keyword-only as of 2.0, others are not - mark = pytest.mark.xfail(reason="GH#45307") - request.node.add_marker(mark) + # GH#52221 makes a mix of positional and keyword arguments behave consistently kwargs = {kwd: 4} ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc, **kwargs) @@ -899,3 +894,21 @@ def test_timestamp_constructor_adjust_value_for_fold(tz, ts_input, fold, value_o result = ts._value expected = value_out assert result == expected + + +@pytest.mark.parametrize("tz", ["dateutil/Europe/London"]) +def test_timestamp_constructor_positional_with_fold(tz): + # Check that we build an object successfully + # if we pass positional arguments and fold + ts = Timestamp(2019, 10, 27, 1, 30, tz=tz, fold=0) + result = ts._value + expected = 1572136200000000 + assert result == expected + + +def test_timestamp_constructor_arg_shift(): + # Check that passing a positional argument as keyword + # does not change the value + result = Timestamp(2019, 10, 27, minute=30) + expected = Timestamp(2019, 10, 27, 0, 30) + assert result == expected
- [ ] closes #52117 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52221
2023-03-26T11:40:03Z
2023-03-31T19:06:42Z
null
2023-03-31T19:06:42Z
BUG: zero-pad shorter years in `Timestamp.isoformat`
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 10a331f302cc4..02f1e43eda62d 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1015,7 +1015,7 @@ cdef class _Timestamp(ABCTimestamp): base_ts = "microseconds" if timespec == "nanoseconds" else timespec base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts) # We need to replace the fake year 1970 with our real year - base = f"{self.year}-" + base.split("-", 1)[1] + base = f"{self.year:04d}-" + base.split("-", 1)[1] if self.nanosecond == 0 and timespec != "nanoseconds": return base diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 7e4002dc3a0cf..2d504d10addd4 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -599,21 +599,13 @@ def test_bounds_with_different_units(self): @pytest.mark.parametrize("arg", ["001-01-01", "0001-01-01"]) def test_out_of_bounds_string_consistency(self, arg): # GH 15829 - msg = "|".join( - [ - "Cannot cast 1-01-01 00:00:00 to unit='ns' without overflow", - "Out of bounds nanosecond timestamp: 1-01-01 00:00:00", - ] - ) + msg = "Cannot cast 0001-01-01 00:00:00 to unit='ns' without overflow" with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp(arg).as_unit("ns") - if arg == "0001-01-01": - # only the 4-digit year goes through ISO path which gets second reso - # instead of ns reso - ts = Timestamp(arg) - assert ts.unit == "s" - assert ts.year == ts.month == ts.day == 1 + ts = Timestamp(arg) + assert ts.unit == "s" + assert ts.year == ts.month == ts.day == 1 def test_min_valid(self): # Ensure that Timestamp.min is a valid Timestamp diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py index 71dbf3539bdb2..0c154963d3726 100644 --- a/pandas/tests/scalar/timestamp/test_formats.py +++ b/pandas/tests/scalar/timestamp/test_formats.py @@ -11,6 +11,15 @@ second=8, microsecond=132263, ) +ts_no_ns_year1 = Timestamp( + year=1, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, +) ts_ns = Timestamp( year=2019, month=5, @@ -50,6 +59,8 @@ (ts_no_ns, "auto", "2019-05-18T15:17:08.132263"), (ts_no_ns, "seconds", "2019-05-18T15:17:08"), (ts_no_ns, "nanoseconds", "2019-05-18T15:17:08.132263000"), + (ts_no_ns_year1, "seconds", "0001-05-18T15:17:08"), + (ts_no_ns_year1, "nanoseconds", "0001-05-18T15:17:08.132263000"), (ts_ns, "auto", "2019-05-18T15:17:08.132263123"), (ts_ns, "hours", "2019-05-18T15"), (ts_ns, "minutes", "2019-05-18T15:17"),
As discussed in #50867, this changes `Timestamp.isoformat` to zero-pad years to 4 digits. After applying the change, I also had to change a existing test because of the now-changed standard format, but also remove a condition which assumed that `"001-01-01"` and `"0001-01-01"` would result in different objects (not sure, maybe there was a time where the resulting `Timestamp` object would not be equal?). However, on current `main`: ```python a = pd.Timestamp("001-01-01") b = pd.Timestamp("0001-01-01") a == b and a.unit == b.unit ``` cc @spencerkclark No `whatsnew` entry yet because I was not sure where to put it. - [x] closes #50867 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] ~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~ - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52220
2023-03-26T10:48:33Z
2023-03-31T16:51:29Z
2023-03-31T16:51:29Z
2023-03-31T18:38:35Z
ENH: make DataFrame.applymap uses the .map method of ExtensionArrays
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 71fda39a05e55..1f8c93978c890 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -36,6 +36,7 @@ Other enhancements - :class:`api.extensions.ExtensionArray` now has a :meth:`~api.extensions.ExtensionArray.map` method (:issue:`51809`) - Improve error message when having incompatible columns using :meth:`DataFrame.merge` (:issue:`51861`) - Improved error message when creating a DataFrame with empty data (0 rows), no index and an incorrect number of columns. (:issue:`52084`) +- :meth:`DataFrame.applymap` now uses the :meth:`~api.extensions.ExtensionArray.map` method of underlying :class:`api.extensions.ExtensionArray` instances (:issue:`52219`) - :meth:`arrays.SparseArray.map` now supports ``na_action`` (:issue:`52096`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bef7022a7d10f..09d3f60cb9e66 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9955,14 +9955,14 @@ def applymap( raise ValueError( f"na_action must be 'ignore' or None. Got {repr(na_action)}" ) - ignore_na = na_action == "ignore" + + if self.empty: + return self.copy() + func = functools.partial(func, **kwargs) - # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): - if x.empty: - return lib.map_infer(x, func, ignore_na=ignore_na) - return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) + return x._map_values(func, na_action=na_action) return self.apply(infer).__finalize__(self, "applymap") diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 6ed3f6140d361..c7eb8c0332e84 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -546,6 +546,29 @@ def test_applymap_float_object_conversion(val): assert result == object +@pytest.mark.parametrize("na_action", [None, "ignore"]) +def test_applymap_keeps_dtype(na_action): + # GH52219 + arr = Series(["a", np.nan, "b"]) + sparse_arr = arr.astype(pd.SparseDtype(object)) + df = DataFrame(data={"a": arr, "b": sparse_arr}) + + def func(x): + return str.upper(x) if not pd.isna(x) else x + + result = df.applymap(func, na_action=na_action) + + expected_sparse = pd.array(["A", np.nan, "B"], dtype=pd.SparseDtype(object)) + expected_arr = expected_sparse.astype(object) + expected = DataFrame({"a": expected_arr, "b": expected_sparse}) + + tm.assert_frame_equal(result, expected) + + result_empty = df.iloc[:0, :].applymap(func, na_action=na_action) + expected_empty = expected.iloc[:0, :] + tm.assert_frame_equal(result_empty, expected_empty) + + def test_applymap_str(): # GH 2786 df = DataFrame(np.random.random((3, 4)))
Currently `DataFrame.applymap` ignores the `.map`method of `ExtensionArrays`. This fixes that. Example: ```python >>> import pandas as pd >>> >>> arr = pd.array(["a", np.nan, "b"], dtype=object) >>> sparse_arr = pd.array(arr.tolist(), dtype=pd.SparseDtype(object)) >>> df = pd.DataFrame(data={'a': arr, "b": sparse_arr}) >>> df.applymap(str.upper, na_action="ignore").dtypes # main a object b object dtype: object >>> df.applymap(str.upper, na_action="ignore").dtypes # this PR a object b Sparse[object, nan] dtype: object ```
https://api.github.com/repos/pandas-dev/pandas/pulls/52219
2023-03-26T09:43:04Z
2023-03-27T18:41:34Z
2023-03-27T18:41:34Z
2023-03-27T18:57:52Z
datetime64 series and dataframe test GH#28273
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 6b635a4f46972..d120a066adfc4 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -390,6 +390,11 @@ def test_encode_time_conversion_dateutil(self): def test_encode_as_null(self, decoded_input): assert ujson.encode(decoded_input) == "null", "Expected null" + def test_encode_datetime64(self): + val = np.datetime64("2000-01-01") + assert ujson.encode(val) == str(-6858695778871) + assert ujson.decode(ujson.encode(val)) == -6858695778871 + def test_datetime_units(self): val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504) stamp = Timestamp(val).as_unit("ns")
- [ ] closes #28273 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). I used main codes to verify the GH28273. Unlike the original codes, the series and data frame types are same. So my codes just test its current behavior.
https://api.github.com/repos/pandas-dev/pandas/pulls/52217
2023-03-26T07:20:54Z
2023-03-28T18:01:02Z
null
2023-03-28T23:22:27Z
BUG: assert_frame_equal still checks category dtypes even when asked not to check index type
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 1f5c3c88c5ff5..7c3e43815d0c8 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -259,6 +259,7 @@ Other ^^^^^ - Bug in :func:`assert_almost_equal` now throwing assertion error for two unequal sets (:issue:`51727`) - Bug in :meth:`Series.memory_usage` when ``deep=True`` throw an error with Series of objects and the returned value is incorrect, as it does not take into account GC corrections (:issue:`51858`) +- Bug in :func:`assert_frame_equal` checks category dtypes even when asked not to check index type (:issue:`52126`) .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index e25e8388bc4cd..3d46d0864b91f 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -294,6 +294,7 @@ def _get_ilevel_values(index, level): exact=exact, check_names=check_names, check_exact=check_exact, + check_categorical=check_categorical, rtol=rtol, atol=atol, obj=lobj, diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index f7d41ed536a40..a48eeb5be8005 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -298,3 +298,17 @@ def test_assert_ea_index_equal_non_matching_na(check_names, check_categorical): tm.assert_index_equal( idx1, idx2, check_names=check_names, check_categorical=check_categorical ) + + +@pytest.mark.parametrize("check_categorical", [True, False]) +def test_assert_multi_index_dtype_check_categorical(check_categorical): + # GH#52126 + idx1 = MultiIndex.from_arrays([Categorical(np.array([1, 2], dtype=np.uint64))]) + idx2 = MultiIndex.from_arrays([Categorical(np.array([1, 2], dtype=np.int64))]) + if check_categorical: + with pytest.raises( + AssertionError, match=r"^MultiIndex level \[0\] are different" + ): + tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical) + else: + tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)
- [x] closes #52126 - [ ] [Tests added and passed] - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52216
2023-03-26T07:10:14Z
2023-04-01T17:51:29Z
2023-04-01T17:51:29Z
2023-04-01T17:51:39Z
Backport PR #52209 on branch 2.0.x (DOC: getting_started tutorials nbviewer broken link structure fixed)
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index bff50bb1e4c2d..1220c915c3cbc 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -113,7 +113,7 @@ Various tutorials * `Wes McKinney's (pandas BDFL) blog <https://wesmckinney.com/archives.html>`_ * `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson <http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/>`_ * `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <https://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_ -* `Financial analysis in Python, by Thomas Wiecki <https://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ +* `Financial analysis in Python, by Thomas Wiecki <https://nbviewer.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ * `Intro to pandas data structures, by Greg Reda <http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/>`_ * `Pandas and Python: Top 10, by Manish Amde <https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_ * `Pandas DataFrames Tutorial, by Karlijn Willems <https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_
Backport PR #52209: DOC: getting_started tutorials nbviewer broken link structure fixed
https://api.github.com/repos/pandas-dev/pandas/pulls/52215
2023-03-26T03:49:04Z
2023-03-26T15:22:58Z
2023-03-26T15:22:58Z
2023-03-26T15:22:58Z
ENH: Adding engine_kwargs to Excel engines for issue #40274
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 1002eb9ee8568..101932a23ca6a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3449,6 +3449,18 @@ Reading Excel files In the most basic use-case, ``read_excel`` takes a path to an Excel file, and the ``sheet_name`` indicating which sheet to parse. +When using the ``engine_kwargs`` parameter, pandas will pass these arguments to the +engine. For this, it is important to know which function pandas is +using internally. + +* For the engine openpyxl, pandas is using :func:`openpyxl.load_workbook` to read in (``.xlsx``) and (``.xlsm``) files. + +* For the engine xlrd, pandas is using :func:`xlrd.open_workbook` to read in (``.xls``) files. + +* For the engine pyxlsb, pandas is using :func:`pyxlsb.open_workbook` to read in (``.xlsb``) files. + +* For the engine odf, pandas is using :func:`odf.opendocument.load` to read in (``.ods``) files. + .. code-block:: python # Returns a DataFrame diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index afe361da1114d..245cc111f3794 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -87,6 +87,7 @@ Other enhancements - :meth:`DataFrame.applymap` now uses the :meth:`~api.extensions.ExtensionArray.map` method of underlying :class:`api.extensions.ExtensionArray` instances (:issue:`52219`) - :meth:`arrays.SparseArray.map` now supports ``na_action`` (:issue:`52096`). - Add dtype of categories to ``repr`` information of :class:`CategoricalDtype` (:issue:`52179`) +- Adding ``engine_kwargs`` parameter to :meth:`DataFrame.read_excel` (:issue:`52214`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 8c3bbb7798f68..92750bdd0f272 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -289,6 +289,9 @@ .. versionadded:: 2.0 +engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + Returns ------- DataFrame or dict of DataFrames @@ -302,6 +305,11 @@ read_csv : Read a comma-separated values (csv) file into DataFrame. read_fwf : Read a table of fixed-width formatted lines into DataFrame. +Notes +----- +For specific information on the methods used for each Excel engine, refer to the pandas +:ref:`user guide <io.excel_reader>` + Examples -------- The file can be read using the file name as string or an open file object: @@ -472,13 +480,21 @@ def read_excel( skipfooter: int = 0, storage_options: StorageOptions = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine_kwargs: dict | None = None, ) -> DataFrame | dict[IntStrT, DataFrame]: check_dtype_backend(dtype_backend) - should_close = False + if engine_kwargs is None: + engine_kwargs = {} + if not isinstance(io, ExcelFile): should_close = True - io = ExcelFile(io, storage_options=storage_options, engine=engine) + io = ExcelFile( + io, + storage_options=storage_options, + engine=engine, + engine_kwargs=engine_kwargs, + ) elif engine and engine != io.engine: raise ValueError( "Engine should not be specified when passing " @@ -520,8 +536,14 @@ def read_excel( class BaseExcelReader(metaclass=abc.ABCMeta): def __init__( - self, filepath_or_buffer, storage_options: StorageOptions = None + self, + filepath_or_buffer, + storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + # First argument can also be bytes, so create a buffer if isinstance(filepath_or_buffer, bytes): filepath_or_buffer = BytesIO(filepath_or_buffer) @@ -540,7 +562,7 @@ def __init__( # N.B. xlrd.Book has a read attribute too self.handles.handle.seek(0) try: - self.book = self.load_workbook(self.handles.handle) + self.book = self.load_workbook(self.handles.handle, engine_kwargs) except Exception: self.close() raise @@ -555,7 +577,7 @@ def _workbook_class(self): pass @abc.abstractmethod - def load_workbook(self, filepath_or_buffer): + def load_workbook(self, filepath_or_buffer, engine_kwargs): pass def close(self) -> None: @@ -1450,6 +1472,8 @@ class ExcelFile: Please do not report issues when using ``xlrd`` to read ``.xlsx`` files. This is not supported, switch to using ``openpyxl`` instead. + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. """ from pandas.io.excel._odfreader import ODFReader @@ -1469,7 +1493,11 @@ def __init__( path_or_buffer, engine: str | None = None, storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + if engine is not None and engine not in self._engines: raise ValueError(f"Unknown engine: {engine}") @@ -1513,7 +1541,11 @@ def __init__( self.engine = engine self.storage_options = storage_options - self._reader = self._engines[engine](self._io, storage_options=storage_options) + self._reader = self._engines[engine]( + self._io, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) def __fspath__(self): return self._io diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index c3d7cb5df717f..c46424d5b26da 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -31,6 +31,7 @@ def __init__( self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: """ Read tables out of OpenDocument formatted files. @@ -40,9 +41,15 @@ def __init__( filepath_or_buffer : str, path to be parsed or an open readable stream. {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. """ import_optional_dependency("odf") - super().__init__(filepath_or_buffer, storage_options=storage_options) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) @property def _workbook_class(self): @@ -50,10 +57,12 @@ def _workbook_class(self): return OpenDocument - def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ): from odf.opendocument import load - return load(filepath_or_buffer) + return load(filepath_or_buffer, **engine_kwargs) @property def empty_value(self) -> str: diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index e751c919ee8dc..195d3a3a8b263 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -536,6 +536,7 @@ def __init__( self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: """ Reader using openpyxl engine. @@ -545,9 +546,15 @@ def __init__( filepath_or_buffer : str, path object or Workbook Object to be parsed. {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. """ import_optional_dependency("openpyxl") - super().__init__(filepath_or_buffer, storage_options=storage_options) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) @property def _workbook_class(self): @@ -555,11 +562,17 @@ def _workbook_class(self): return Workbook - def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ): from openpyxl import load_workbook return load_workbook( - filepath_or_buffer, read_only=True, data_only=True, keep_links=False + filepath_or_buffer, + read_only=True, + data_only=True, + keep_links=False, + **engine_kwargs, ) @property diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index bfe21082cc4d0..a1234b0e74c3e 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -25,6 +25,7 @@ def __init__( self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: """ Reader using pyxlsb engine. @@ -34,11 +35,17 @@ def __init__( filepath_or_buffer : str, path object, or Workbook Object to be parsed. {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. """ import_optional_dependency("pyxlsb") # This will call load_workbook on the filepath or buffer # And set the result to the book-attribute - super().__init__(filepath_or_buffer, storage_options=storage_options) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) @property def _workbook_class(self): @@ -46,14 +53,16 @@ def _workbook_class(self): return Workbook - def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ): from pyxlsb import open_workbook # TODO: hack in buffer capability # This might need some modifications to the Pyxlsb library # Actual work for opening it is in xlsbpackage.py, line 20-ish - return open_workbook(filepath_or_buffer) + return open_workbook(filepath_or_buffer, **engine_kwargs) @property def sheet_names(self) -> list[str]: diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index 702d00e7fdea7..d131567cf70f7 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -22,7 +22,10 @@ class XlrdReader(BaseExcelReader): @doc(storage_options=_shared_docs["storage_options"]) def __init__( - self, filepath_or_buffer, storage_options: StorageOptions = None + self, + filepath_or_buffer, + storage_options: StorageOptions = None, + engine_kwargs: dict | None = None, ) -> None: """ Reader using xlrd engine. @@ -32,10 +35,16 @@ def __init__( filepath_or_buffer : str, path object or Workbook Object to be parsed. {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. """ err_msg = "Install xlrd >= 2.0.1 for xls Excel support" import_optional_dependency("xlrd", extra=err_msg) - super().__init__(filepath_or_buffer, storage_options=storage_options) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) @property def _workbook_class(self): @@ -43,14 +52,14 @@ def _workbook_class(self): return Book - def load_workbook(self, filepath_or_buffer): + def load_workbook(self, filepath_or_buffer, engine_kwargs): from xlrd import open_workbook if hasattr(filepath_or_buffer, "read"): data = filepath_or_buffer.read() - return open_workbook(file_contents=data) + return open_workbook(file_contents=data, **engine_kwargs) else: - return open_workbook(filepath_or_buffer) + return open_workbook(filepath_or_buffer, **engine_kwargs) @property def sheet_names(self): diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index c22051912d293..05c86be850b32 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -6,6 +6,7 @@ import os from pathlib import Path import platform +import re from urllib.error import URLError from zipfile import BadZipFile @@ -148,6 +149,32 @@ def parser(self, *args, **kwargs): expected = expected_defaults[read_ext[1:]] assert result == expected + def test_engine_kwargs(self, read_ext, engine): + # GH#52214 + expected_defaults = { + "xlsx": {"foo": "abcd"}, + "xlsm": {"foo": 123}, + "xlsb": {"foo": "True"}, + "xls": {"foo": True}, + "ods": {"foo": "abcd"}, + } + + if read_ext[1:] == "xls" or read_ext[1:] == "xlsb": + msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'") + elif read_ext[1:] == "ods": + msg = re.escape(r"load() got an unexpected keyword argument 'foo'") + else: + msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'") + + if engine is not None: + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=0, + engine_kwargs=expected_defaults[read_ext[1:]], + ) + def test_usecols_int(self, read_ext): # usecols as int msg = "Passing an integer for `usecols`"
- [X] closes #40274 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52214
2023-03-26T02:02:06Z
2023-04-12T15:52:03Z
2023-04-12T15:52:03Z
2023-08-30T12:35:13Z
PERF: dtype checks
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 19a121253e29a..ae1d20ca4e225 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -10,7 +10,6 @@ import sys import time import warnings -from pandas.errors import ParserError from pandas.util._exceptions import find_stack_level from pandas import StringDtype @@ -106,15 +105,10 @@ from pandas.errors import ( ParserWarning, ) -from pandas.core.dtypes.common import ( - is_bool_dtype, - is_datetime64_dtype, - is_extension_array_dtype, - is_float_dtype, - is_integer_dtype, - is_object_dtype, +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, ) -from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.inference import is_dict_like cdef: @@ -1077,7 +1071,7 @@ cdef class TextReader: # don't try to upcast EAs if ( - na_count > 0 and not is_extension_array_dtype(col_dtype) + na_count > 0 and not isinstance(col_dtype, ExtensionDtype) or self.dtype_backend != "numpy" ): use_dtype_backend = self.dtype_backend != "numpy" and col_dtype is None @@ -1142,14 +1136,14 @@ cdef class TextReader: # (see _try_bool_flex()). Usually this would be taken care of using # _maybe_upcast(), but if col_dtype is a floating type we should just # take care of that cast here. - if col_res.dtype == np.bool_ and is_float_dtype(col_dtype): + if col_res.dtype == np.bool_ and col_dtype.kind == "f": mask = col_res.view(np.uint8) == na_values[np.uint8] col_res = col_res.astype(col_dtype) np.putmask(col_res, mask, np.nan) return col_res, na_count # NaNs are already cast to True here, so can not use astype - if col_res.dtype == np.bool_ and is_integer_dtype(col_dtype): + if col_res.dtype == np.bool_ and col_dtype.kind in "iu": if na_count > 0: raise ValueError( f"cannot safely convert passed user dtype of " @@ -1193,14 +1187,14 @@ cdef class TextReader: cats, codes, dtype, true_values=true_values) return cat, na_count - elif is_extension_array_dtype(dtype): + elif isinstance(dtype, ExtensionDtype): result, na_count = self._string_convert(i, start, end, na_filter, na_hashset) array_type = dtype.construct_array_type() try: # use _from_sequence_of_strings if the class defines it - if is_bool_dtype(dtype): + if dtype.kind == "b": true_values = [x.decode() for x in self.true_values] false_values = [x.decode() for x in self.false_values] result = array_type._from_sequence_of_strings( @@ -1216,7 +1210,7 @@ cdef class TextReader: return result, na_count - elif is_integer_dtype(dtype): + elif dtype.kind in "iu": try: result, na_count = _try_int64(self.parser, i, start, end, na_filter, na_hashset) @@ -1233,14 +1227,14 @@ cdef class TextReader: return result, na_count - elif is_float_dtype(dtype): + elif dtype.kind == "f": result, na_count = _try_double(self.parser, i, start, end, na_filter, na_hashset, na_flist) if result is not None and dtype != "float64": result = result.astype(dtype) return result, na_count - elif is_bool_dtype(dtype): + elif dtype.kind == "b": result, na_count = _try_bool_flex(self.parser, i, start, end, na_filter, na_hashset, self.true_set, self.false_set) @@ -1267,10 +1261,10 @@ cdef class TextReader: # unicode variable width return self._string_convert(i, start, end, na_filter, na_hashset) - elif is_object_dtype(dtype): + elif dtype == object: return self._string_convert(i, start, end, na_filter, na_hashset) - elif is_datetime64_dtype(dtype): + elif dtype.kind == "M": raise TypeError(f"the dtype {dtype} is not supported " f"for parsing, pass this column " f"using parse_dates instead") @@ -1438,7 +1432,7 @@ def _maybe_upcast( ------- The casted array. """ - if is_extension_array_dtype(arr.dtype): + if isinstance(arr.dtype, ExtensionDtype): # TODO: the docstring says arr is an ndarray, in which case this cannot # be reached. Is that incorrect? return arr diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index e25e8388bc4cd..2c0d75bcf2250 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -13,10 +13,8 @@ from pandas.core.dtypes.common import ( is_bool, - is_categorical_dtype, is_extension_array_dtype, is_integer_dtype, - is_interval_dtype, is_number, is_numeric_dtype, needs_i8_conversion, @@ -33,6 +31,7 @@ DataFrame, DatetimeIndex, Index, + IntervalDtype, IntervalIndex, MultiIndex, PeriodIndex, @@ -238,7 +237,9 @@ def _check_types(left, right, obj: str = "Index") -> None: assert_attr_equal("inferred_type", left, right, obj=obj) # Skip exact dtype checking when `check_categorical` is False - if is_categorical_dtype(left.dtype) and is_categorical_dtype(right.dtype): + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): if check_categorical: assert_attr_equal("dtype", left, right, obj=obj) assert_index_equal(left.categories, right.categories, exact=exact) @@ -335,7 +336,9 @@ def _get_ilevel_values(index, level): assert_interval_array_equal(left._values, right._values) if check_categorical: - if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype): + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): assert_categorical_equal(left._values, right._values, obj=f"{obj} category") @@ -946,7 +949,9 @@ def assert_series_equal( f"is not equal to {right._values}." ) raise AssertionError(msg) - elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype): + elif isinstance(left.dtype, IntervalDtype) and isinstance( + right.dtype, IntervalDtype + ): assert_interval_array_equal(left.array, right.array) elif isinstance(left.dtype, CategoricalDtype) or isinstance( right.dtype, CategoricalDtype diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index db45f140c268e..5cddf3c2c865b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -42,7 +42,6 @@ ensure_platform_int, is_array_like, is_bool_dtype, - is_categorical_dtype, is_complex_dtype, is_dict_like, is_extension_array_dtype, @@ -59,6 +58,7 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, + CategoricalDtype, ExtensionDtype, PandasDtype, ) @@ -141,7 +141,7 @@ def _ensure_data(values: ArrayLike) -> np.ndarray: return _ensure_data(values._data) return np.asarray(values) - elif is_categorical_dtype(values.dtype): + elif isinstance(values.dtype, CategoricalDtype): # NB: cases that go through here should NOT be using _reconstruct_data # on the back-end. values = cast("Categorical", values) @@ -417,7 +417,7 @@ def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None): """See algorithms.unique for docs. Takes a mask for masked arrays.""" values = _ensure_arraylike(values) - if is_extension_array_dtype(values.dtype): + if isinstance(values.dtype, ExtensionDtype): # Dispatch to extension dtype's unique. return values.unique() @@ -1534,7 +1534,7 @@ def safe_sort( ordered: AnyArrayLike if ( - not is_extension_array_dtype(values) + not isinstance(values.dtype, ExtensionDtype) and lib.infer_dtype(values, skipna=False) == "mixed-integer" ): ordered = _sort_mixed(values) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 3eb7159399bb3..334400cc13201 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -83,10 +83,8 @@ from pandas.core.dtypes.common import ( is_all_strings, - is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_float_dtype, @@ -99,8 +97,10 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import ( + CategoricalDtype, DatetimeTZDtype, ExtensionDtype, + PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCCategorical, @@ -167,7 +167,7 @@ def _period_dispatch(meth: F) -> F: @wraps(meth) def new_meth(self, *args, **kwargs): - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): return meth(self, *args, **kwargs) arr = self.view("M8[ns]") @@ -377,7 +377,7 @@ def _get_getitem_freq(self, key) -> BaseOffset | None: """ Find the `freq` attribute to assign to the result of a __getitem__ lookup. """ - is_period = is_period_dtype(self.dtype) + is_period = isinstance(self.dtype, PeriodDtype) if is_period: freq = self.freq elif self.ndim != 1: @@ -437,7 +437,7 @@ def astype(self, dtype, copy: bool = True): # 3. DatetimeArray.astype handles datetime -> period dtype = pandas_dtype(dtype) - if is_object_dtype(dtype): + if dtype == object: if self.dtype.kind == "M": self = cast("DatetimeArray", self) # *much* faster than self._box_values @@ -521,7 +521,7 @@ def _concat_same_type( dtype = obj.dtype new_freq = None - if is_period_dtype(dtype): + if isinstance(dtype, PeriodDtype): new_freq = obj.freq elif axis == 0: # GH 3232: If the concat result is evenly spaced, we can retain the @@ -703,7 +703,7 @@ def _validate_listlike(self, value, allow_object: bool = False): except ValueError: pass - if is_categorical_dtype(value.dtype): + if isinstance(value.dtype, CategoricalDtype): # e.g. we have a Categorical holding self.dtype if is_dtype_equal(value.categories.dtype, self.dtype): # TODO: do we need equal dtype or just comparable? @@ -951,7 +951,7 @@ def _cmp_method(self, other, op): result = np.zeros(self.shape, dtype=bool) return result - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): self = cast(TimelikeOps, self) if self._creso != other._creso: if not isinstance(other, type(self)): @@ -1022,7 +1022,7 @@ def _get_arithmetic_result_freq(self, other) -> BaseOffset | None: """ # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving # whenever self.freq is a Tick - if is_period_dtype(self.dtype): + if isinstance(self.dtype, PeriodDtype): return self.freq elif not lib.is_scalar(other): return None @@ -1200,7 +1200,7 @@ def _add_nat(self): """ Add pd.NaT to self """ - if is_period_dtype(self.dtype): + if isinstance(self.dtype, PeriodDtype): raise TypeError( f"Cannot add {type(self).__name__} and {type(NaT).__name__}" ) @@ -1237,7 +1237,7 @@ def _sub_nat(self): def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]: # If the operation is well-defined, we return an object-dtype ndarray # of DateOffsets. Null entries are filled with pd.NaT - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): raise TypeError( f"cannot subtract {type(other).__name__} from {type(self).__name__}" ) @@ -1327,7 +1327,7 @@ def __add__(self, other): elif lib.is_integer(other): # This check must come after the check for np.timedelta64 # as is_integer returns True for these - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast("PeriodArray", self) result = obj._addsub_int_array_or_scalar(other * obj.freq.n, operator.add) @@ -1339,11 +1339,13 @@ def __add__(self, other): elif is_object_dtype(other_dtype): # e.g. Array/Index of DateOffset objects result = self._addsub_object_array(other, operator.add) - elif is_datetime64_dtype(other_dtype) or is_datetime64tz_dtype(other_dtype): + elif is_datetime64_dtype(other_dtype) or isinstance( + other_dtype, DatetimeTZDtype + ): # DatetimeIndex, ndarray[datetime64] return self._add_datetime_arraylike(other) elif is_integer_dtype(other_dtype): - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast("PeriodArray", self) result = obj._addsub_int_array_or_scalar(other * obj.freq.n, operator.add) @@ -1383,7 +1385,7 @@ def __sub__(self, other): elif lib.is_integer(other): # This check must come after the check for np.timedelta64 # as is_integer returns True for these - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast("PeriodArray", self) result = obj._addsub_int_array_or_scalar(other * obj.freq.n, operator.sub) @@ -1398,14 +1400,16 @@ def __sub__(self, other): elif is_object_dtype(other_dtype): # e.g. Array/Index of DateOffset objects result = self._addsub_object_array(other, operator.sub) - elif is_datetime64_dtype(other_dtype) or is_datetime64tz_dtype(other_dtype): + elif is_datetime64_dtype(other_dtype) or isinstance( + other_dtype, DatetimeTZDtype + ): # DatetimeIndex, ndarray[datetime64] result = self._sub_datetime_arraylike(other) elif is_period_dtype(other_dtype): # PeriodIndex result = self._sub_periodlike(other) elif is_integer_dtype(other_dtype): - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast("PeriodArray", self) result = obj._addsub_int_array_or_scalar(other * obj.freq.n, operator.sub) @@ -1444,7 +1448,7 @@ def __rsub__(self, other): raise TypeError( f"cannot subtract {type(self).__name__} from {type(other).__name__}" ) - elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other_dtype): + elif isinstance(self.dtype, PeriodDtype) and is_timedelta64_dtype(other_dtype): # TODO: Can we simplify/generalize these cases at all? raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}") elif is_timedelta64_dtype(self.dtype): @@ -1458,7 +1462,7 @@ def __iadd__(self, other) -> Self: result = self + other self[:] = result[:] - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): # restore freq, which is invalidated by setitem self._freq = result.freq return self @@ -1467,7 +1471,7 @@ def __isub__(self, other) -> Self: result = self - other self[:] = result[:] - if not is_period_dtype(self.dtype): + if not isinstance(self.dtype, PeriodDtype): # restore freq, which is invalidated by setitem self._freq = result.freq return self @@ -1543,7 +1547,7 @@ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0): ----- mean is only defined for Datetime and Timedelta dtypes, not for Period. """ - if is_period_dtype(self.dtype): + if isinstance(self.dtype, PeriodDtype): # See discussion in GH#24757 raise TypeError( f"mean is not implemented for {type(self).__name__} since the " @@ -1987,7 +1991,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): def _round(self, freq, mode, ambiguous, nonexistent): # round the local times - if is_datetime64tz_dtype(self.dtype): + if isinstance(self.dtype, DatetimeTZDtype): # operate on naive timestamps, then convert back to aware self = cast("DatetimeArray", self) naive = self.tz_localize(None) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 296e8e0784e38..c80ab32db1ea6 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -52,12 +52,9 @@ is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_dtype_equal, - is_extension_array_dtype, is_float_dtype, is_object_dtype, - is_period_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, @@ -66,6 +63,7 @@ from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, + PeriodDtype, ) from pandas.core.dtypes.missing import isna @@ -697,7 +695,7 @@ def astype(self, dtype, copy: bool = True): "Pass e.g. 'datetime64[ns]' instead." ) - elif is_period_dtype(dtype): + elif isinstance(dtype, PeriodDtype): return self.to_period(freq=dtype.freq) return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) @@ -734,7 +732,7 @@ def _assert_tzawareness_compat(self, other) -> None: other_tz = getattr(other, "tzinfo", None) other_dtype = getattr(other, "dtype", None) - if is_datetime64tz_dtype(other_dtype): + if isinstance(other_dtype, DatetimeTZDtype): # Get tzinfo from Series dtype other_tz = other.dtype.tz if other is NaT: @@ -2075,7 +2073,7 @@ def _sequence_to_dt64ns( # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. - if is_datetime64tz_dtype(data_dtype): + if isinstance(data_dtype, DatetimeTZDtype): # DatetimeArray -> ndarray tz = _maybe_infer_tz(tz, data.tz) result = data._ndarray @@ -2242,14 +2240,16 @@ def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype): # GH#29794 enforcing deprecation introduced in GH#23539 raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") - elif is_period_dtype(data.dtype): + elif isinstance(data.dtype, PeriodDtype): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError( "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" ) - elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype): + elif isinstance(data.dtype, ExtensionDtype) and not isinstance( + data.dtype, DatetimeTZDtype + ): # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 99c0553998d63..3978e5bf13fbe 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -26,7 +26,6 @@ TD64NS_DTYPE, ensure_object, is_bool_dtype, - is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, @@ -283,7 +282,7 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False): if not isinstance(values, np.ndarray): # i.e. ExtensionArray - if inf_as_na and is_categorical_dtype(dtype): + if inf_as_na and isinstance(dtype, CategoricalDtype): result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na) else: # error: Incompatible types in assignment (expression has type diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index d2f53af8ca1d9..ffd33a39b8d2b 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -13,11 +13,11 @@ from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, - is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, @@ -531,7 +531,7 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: dtype = indexer.dtype if is_bool_dtype(dtype): - if is_extension_array_dtype(dtype): + if isinstance(dtype, ExtensionDtype): indexer = indexer.to_numpy(dtype=bool, na_value=False) else: indexer = np.asarray(indexer, dtype=bool) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c93eb0fe3def6..934daf7eaf708 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -95,7 +95,6 @@ is_bool_dtype, is_dtype_equal, is_ea_or_datetimelike_dtype, - is_extension_array_dtype, is_float, is_float_dtype, is_hashable, @@ -1376,7 +1375,7 @@ def _format_native_types( """ from pandas.io.formats.format import FloatArrayFormatter - if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): + if is_float_dtype(self.dtype) and not isinstance(self.dtype, ExtensionDtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, @@ -1388,7 +1387,7 @@ def _format_native_types( return formatter.get_result_as_array() mask = isna(self) - if not is_object_dtype(self) and not quoting: + if self.dtype != object and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) @@ -5200,7 +5199,7 @@ def __getitem__(self, key): # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% - if is_extension_array_dtype(getattr(key, "dtype", None)): + if isinstance(getattr(key, "dtype", None), ExtensionDtype): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) @@ -5409,7 +5408,7 @@ def equals(self, other: Any) -> bool: earr = cast(ExtensionArray, self._data) return earr.equals(other._data) - if is_extension_array_dtype(other.dtype): + if isinstance(other.dtype, ExtensionDtype): # All EA-backed Index subclasses override equals return other.equals(self) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index abe4a00e0b813..dc1c87b4787a8 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -57,8 +57,6 @@ from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, - is_categorical_dtype, - is_extension_array_dtype, is_hashable, is_integer, is_iterator, @@ -67,7 +65,10 @@ is_scalar, pandas_dtype, ) -from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, @@ -748,7 +749,7 @@ def _values(self) -> np.ndarray: codes = self.codes[i] vals = index - if is_categorical_dtype(vals.dtype): + if isinstance(vals.dtype, CategoricalDtype): vals = cast("CategoricalIndex", vals) vals = vals._data._internal_get_values() @@ -3650,7 +3651,7 @@ def _convert_can_do_setop(self, other): @doc(Index.astype) def astype(self, dtype, copy: bool = True): dtype = pandas_dtype(dtype) - if is_categorical_dtype(dtype): + if isinstance(dtype, CategoricalDtype): msg = "> 1 ndim Categorical are not supported at this time" raise NotImplementedError(msg) if not is_object_dtype(dtype): @@ -3852,13 +3853,13 @@ def sparsify_labels(label_list, start: int = 0, sentinel: object = ""): return list(zip(*result)) -def _get_na_rep(dtype) -> str: - if is_extension_array_dtype(dtype): +def _get_na_rep(dtype: DtypeObj) -> str: + if isinstance(dtype, ExtensionDtype): return f"{dtype.na_value}" else: - dtype = dtype.type + dtype_type = dtype.type - return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN") + return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype_type, "NaN") def maybe_droplevels(index: Index, key) -> Index: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 311729d3dc00a..f48b044ff0016 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -58,14 +58,13 @@ is_1d_only_ea_dtype, is_1d_only_ea_obj, is_dtype_equal, - is_interval_dtype, is_list_like, - is_sparse, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, + IntervalDtype, PandasDtype, PeriodDtype, ) @@ -106,6 +105,7 @@ PeriodArray, TimedeltaArray, ) +from pandas.core.arrays.sparse.dtype import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions @@ -1620,7 +1620,7 @@ def setitem(self, indexer, value, using_cow: bool = False): except (ValueError, TypeError) as err: _catch_deprecated_value_error(err) - if is_interval_dtype(self.dtype): + if isinstance(self.dtype, IntervalDtype): # see TestSetitemFloatIntervalWithIntIntervalValues nb = self.coerce_to_target_dtype(orig_value) return nb.setitem(orig_indexer, orig_value) @@ -1665,7 +1665,7 @@ def where( _catch_deprecated_value_error(err) if self.ndim == 1 or self.shape[0] == 1: - if is_interval_dtype(self.dtype): + if isinstance(self.dtype, IntervalDtype): # TestSetitemFloatIntervalWithIntIntervalValues blk = self.coerce_to_target_dtype(orig_other) nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) @@ -1740,7 +1740,7 @@ def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: _catch_deprecated_value_error(err) if self.ndim == 1 or self.shape[0] == 1: - if is_interval_dtype(self.dtype): + if isinstance(self.dtype, IntervalDtype): # Discussion about what we want to support in the general # case GH#39584 blk = self.coerce_to_target_dtype(orig_new) @@ -1848,7 +1848,7 @@ def fillna( downcast=None, using_cow: bool = False, ) -> list[Block]: - if is_interval_dtype(self.dtype): + if isinstance(self.dtype, IntervalDtype): # Block.fillna handles coercion (test_fillna_interval) return super().fillna( value=value, @@ -2517,7 +2517,7 @@ def to_native_types( results_converted.append(result.astype(object, copy=False)) return np.vstack(results_converted) - elif values.dtype.kind == "f" and not is_sparse(values): + elif values.dtype.kind == "f" and not isinstance(values.dtype, SparseDtype): # see GH#13418: no special formatting is desired at the # output (important for appropriate 'quoting' behaviour), # so do not pass it through the FloatArrayFormatter diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index bcf0b77dab9b2..34e3ce92698cb 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -30,7 +30,6 @@ is_bool_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, - is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, @@ -283,7 +282,7 @@ def ndarray_to_mgr( return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) - elif is_extension_array_dtype(vdtype): + elif isinstance(vdtype, ExtensionDtype): # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) # are already caught above values = extract_array(values, extract_numpy=True) diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 54ae217990d96..0ce6a86d98403 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -26,13 +26,13 @@ is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, - is_extension_array_dtype, is_integer, is_list_like, is_numeric_dtype, is_scalar, is_timedelta64_dtype, ) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna @@ -498,7 +498,7 @@ def _coerce_to_type(x): # Will properly support in the future. # https://github.com/pandas-dev/pandas/pull/31290 # https://github.com/pandas-dev/pandas/issues/31389 - elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype): + elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype): x = x.to_numpy(dtype=np.float64, na_value=np.nan) if dtype is not None: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index dc89c11bef231..fc12a8b0722e6 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -51,10 +51,8 @@ from pandas._libs.tslibs.nattype import NaTType from pandas.core.dtypes.common import ( - is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, - is_extension_array_dtype, is_float, is_float_dtype, is_integer, @@ -64,7 +62,11 @@ is_scalar, is_timedelta64_dtype, ) -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) from pandas.core.dtypes.missing import ( isna, notna, @@ -355,7 +357,7 @@ def _get_footer(self) -> str: # level infos are added to the end and in a new line, like it is done # for Categoricals - if is_categorical_dtype(self.tr_series.dtype): + if isinstance(self.tr_series.dtype, CategoricalDtype): level_info = self.tr_series._values._repr_categories_info() if footer: footer += "\n" @@ -1294,7 +1296,7 @@ def format_array( fmt_klass = Datetime64TZFormatter elif is_timedelta64_dtype(values.dtype): fmt_klass = Timedelta64Formatter - elif is_extension_array_dtype(values.dtype): + elif isinstance(values.dtype, ExtensionDtype): fmt_klass = ExtensionArrayFormatter elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype): fmt_klass = FloatArrayFormatter
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52213
2023-03-26T01:10:07Z
2023-03-27T21:01:19Z
2023-03-27T21:01:19Z
2023-03-27T21:02:26Z
API/BUG: infer_dtype_from_scalar with non-nano
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index e1ac9e3309de7..703322488d328 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -317,6 +317,7 @@ Datetimelike - Bug in :func:`date_range` when ``freq`` was a :class:`DateOffset` with ``nanoseconds`` (:issue:`46877`) - Bug in :meth:`Timestamp.round` with values close to the implementation bounds returning incorrect results instead of raising ``OutOfBoundsDatetime`` (:issue:`51494`) - Bug in :meth:`arrays.DatetimeArray.map` and :meth:`DatetimeIndex.map`, where the supplied callable operated array-wise instead of element-wise (:issue:`51977`) +- Bug in constructing a :class:`Series` or :class:`DataFrame` from a datetime or timedelta scalar always inferring nanosecond resolution instead of inferring from the input (:issue:`52212`) - Bug in parsing datetime strings with weekday but no day e.g. "2023 Sept Thu" incorrectly raising ``AttributeError`` instead of ``ValueError`` (:issue:`52659`) - diff --git a/pandas/conftest.py b/pandas/conftest.py index 86f0121dd00a9..c24a56493b519 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -931,7 +931,7 @@ def rand_series_with_duplicate_datetimeindex() -> Series: (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), - DatetimeTZDtype(tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5f859d1bc6ee6..e7a6692807685 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -645,7 +645,18 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): if inferred == dtype: return dtype, fv - return np.dtype("object"), fill_value + elif inferred.kind == "m": + # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns] + # see if we can losslessly cast it to our dtype + unit = np.datetime_data(dtype)[0] + try: + td = Timedelta(fill_value).as_unit(unit, round_ok=False) + except OutOfBoundsTimedelta: + return _dtype_obj, fill_value + else: + return dtype, td.asm8 + + return _dtype_obj, fill_value elif is_float(fill_value): if issubclass(dtype.type, np.bool_): @@ -775,8 +786,6 @@ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: elif isinstance(val, (np.datetime64, dt.datetime)): try: val = Timestamp(val) - if val is not NaT: - val = val.as_unit("ns") except OutOfBoundsDatetime: return _dtype_obj, val @@ -785,7 +794,7 @@ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: dtype = val.dtype # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes else: - dtype = DatetimeTZDtype(unit="ns", tz=val.tz) + dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) elif isinstance(val, (np.timedelta64, dt.timedelta)): try: @@ -793,8 +802,11 @@ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: except (OutOfBoundsTimedelta, OverflowError): dtype = _dtype_obj else: - dtype = np.dtype("m8[ns]") - val = np.timedelta64(val.value, "ns") + if val is NaT: + val = np.timedelta64("NaT", "ns") + else: + val = val.asm8 + dtype = val.dtype elif is_bool(val): dtype = np.dtype(np.bool_) diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py index 53d0656a11f81..b5d761b3549fa 100644 --- a/pandas/tests/dtypes/cast/test_infer_dtype.py +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -61,17 +61,31 @@ def test_infer_dtype_from_complex(complex_dtype): assert dtype == np.complex_ -@pytest.mark.parametrize( - "data", [np.datetime64(1, "ns"), Timestamp(1), datetime(2000, 1, 1, 0, 0)] -) -def test_infer_dtype_from_datetime(data): - dtype, val = infer_dtype_from_scalar(data) +def test_infer_dtype_from_datetime(): + dt64 = np.datetime64(1, "ns") + dtype, val = infer_dtype_from_scalar(dt64) assert dtype == "M8[ns]" + ts = Timestamp(1) + dtype, val = infer_dtype_from_scalar(ts) + assert dtype == "M8[ns]" -@pytest.mark.parametrize("data", [np.timedelta64(1, "ns"), Timedelta(1), timedelta(1)]) -def test_infer_dtype_from_timedelta(data): - dtype, val = infer_dtype_from_scalar(data) + dt = datetime(2000, 1, 1, 0, 0) + dtype, val = infer_dtype_from_scalar(dt) + assert dtype == "M8[us]" + + +def test_infer_dtype_from_timedelta(): + td64 = np.timedelta64(1, "ns") + dtype, val = infer_dtype_from_scalar(td64) + assert dtype == "m8[ns]" + + pytd = timedelta(1) + dtype, val = infer_dtype_from_scalar(pytd) + assert dtype == "m8[us]" + + td = Timedelta(1) + dtype, val = infer_dtype_from_scalar(td) assert dtype == "m8[ns]" @@ -140,9 +154,9 @@ def test_infer_dtype_from_scalar_errors(): (b"foo", np.object_), (1, np.int64), (1.5, np.float_), - (np.datetime64("2016-01-01"), np.dtype("M8[ns]")), - (Timestamp("20160101"), np.dtype("M8[ns]")), - (Timestamp("20160101", tz="UTC"), "datetime64[ns, UTC]"), + (np.datetime64("2016-01-01"), np.dtype("M8[s]")), + (Timestamp("20160101"), np.dtype("M8[s]")), + (Timestamp("20160101", tz="UTC"), "datetime64[s, UTC]"), ], ) def test_infer_dtype_from_scalar(value, expected): diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 224abbcef27df..c399e6fc65bc7 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -819,7 +819,7 @@ def test_setitem_single_column_mixed_datetime(self): # check our dtypes result = df.dtypes expected = Series( - [np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")], + [np.dtype("float64")] * 3 + [np.dtype("datetime64[s]")], index=["foo", "bar", "baz", "timestamp"], ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 3edfd47cb05a1..b745575876212 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -154,7 +154,7 @@ def test_setitem_dt64_index_empty_columns(self): def test_setitem_timestamp_empty_columns(self): # GH#19843 df = DataFrame(index=range(3)) - df["now"] = Timestamp("20130101", tz="UTC") + df["now"] = Timestamp("20130101", tz="UTC").as_unit("ns") expected = DataFrame( [[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"] @@ -234,7 +234,7 @@ def test_setitem_dict_preserves_dtypes(self): (Interval(left=0, right=5), IntervalDtype("int64", "right")), ( Timestamp("2011-01-01", tz="US/Eastern"), - DatetimeTZDtype(tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), ), ], ) diff --git a/pandas/tests/frame/methods/test_get_numeric_data.py b/pandas/tests/frame/methods/test_get_numeric_data.py index bed611b3a969e..ec1c768603a59 100644 --- a/pandas/tests/frame/methods/test_get_numeric_data.py +++ b/pandas/tests/frame/methods/test_get_numeric_data.py @@ -21,7 +21,7 @@ def test_get_numeric_data_preserve_dtype(self): tm.assert_frame_equal(result, expected) def test_get_numeric_data(self): - datetime64name = np.dtype("M8[ns]").name + datetime64name = np.dtype("M8[s]").name objectname = np.dtype(np.object_).name df = DataFrame( diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index a96dec5f34ce1..1ed0143e5b309 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -8,6 +8,10 @@ import pytest from pandas._libs.tslibs.timezones import dateutil_gettz as gettz +from pandas.compat import ( + IS64, + is_platform_windows, +) import pandas.util._test_decorators as td import pandas as pd @@ -118,6 +122,11 @@ class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in # test_indexing + @pytest.mark.xfail( + not IS64 or is_platform_windows(), + reason="Passes int32 values to DatetimeArray in make_na_array on " + "windows, 32bit linux builds", + ) @td.skip_array_manager_not_yet_implemented def test_reindex_tzaware_fill_value(self): # GH#52586 @@ -125,8 +134,9 @@ def test_reindex_tzaware_fill_value(self): ts = pd.Timestamp("2023-04-10 17:32", tz="US/Pacific") res = df.reindex([0, 1], axis=1, fill_value=ts) - assert res.dtypes[1] == pd.DatetimeTZDtype(tz="US/Pacific") + assert res.dtypes[1] == pd.DatetimeTZDtype(unit="s", tz="US/Pacific") expected = DataFrame({0: [1], 1: [ts]}) + expected[1] = expected[1].astype(res.dtypes[1]) tm.assert_frame_equal(res, expected) per = ts.tz_localize(None).to_period("s") @@ -137,8 +147,9 @@ def test_reindex_tzaware_fill_value(self): interval = pd.Interval(ts, ts + pd.Timedelta(seconds=1)) res = df.reindex([0, 1], axis=1, fill_value=interval) - assert res.dtypes[1] == pd.IntervalDtype("datetime64[ns, US/Pacific]", "right") + assert res.dtypes[1] == pd.IntervalDtype("datetime64[s, US/Pacific]", "right") expected = DataFrame({0: [1], 1: [interval]}) + expected[1] = expected[1].astype(res.dtypes[1]) tm.assert_frame_equal(res, expected) def test_reindex_copies(self): diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index b44b05f9f8153..5671a569c8ac8 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -656,7 +656,9 @@ def create_cols(name): "foo", index=df_float.index, columns=create_cols("object") ) df_dt = DataFrame( - Timestamp("20010101"), index=df_float.index, columns=create_cols("date") + Timestamp("20010101").as_unit("ns"), + index=df_float.index, + columns=create_cols("date"), ) # add in some nans @@ -664,6 +666,7 @@ def create_cols(name): # ## this is a bug in read_csv right now #### # df_dt.loc[30:50,1:3] = np.nan + # FIXME: don't leave commented-out df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1) @@ -702,7 +705,9 @@ def test_to_csv_dups_cols(self): df_int = DataFrame(np.random.randn(1000, 3)).astype("int64") df_bool = DataFrame(True, index=df_float.index, columns=range(3)) df_object = DataFrame("foo", index=df_float.index, columns=range(3)) - df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3)) + df_dt = DataFrame( + Timestamp("20010101").as_unit("ns"), index=df_float.index, columns=range(3) + ) df = pd.concat( [df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True ) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 0ddcbf87e3b4c..3ad5c304d9a30 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -191,20 +191,20 @@ def test_construction_with_mixed(self, float_string_frame): # check dtypes result = df.dtypes - expected = Series({"datetime64[ns]": 3}) + expected = Series({"datetime64[us]": 3}) # mixed-type frames float_string_frame["datetime"] = datetime.now() float_string_frame["timedelta"] = timedelta(days=1, seconds=1) - assert float_string_frame["datetime"].dtype == "M8[ns]" - assert float_string_frame["timedelta"].dtype == "m8[ns]" + assert float_string_frame["datetime"].dtype == "M8[us]" + assert float_string_frame["timedelta"].dtype == "m8[us]" result = float_string_frame.dtypes expected = Series( [np.dtype("float64")] * 4 + [ np.dtype("object"), - np.dtype("datetime64[ns]"), - np.dtype("timedelta64[ns]"), + np.dtype("datetime64[us]"), + np.dtype("timedelta64[us]"), ], index=list("ABCD") + ["foo", "datetime", "timedelta"], ) @@ -230,7 +230,7 @@ def test_construction_with_conversions(self): }, index=range(3), ) - assert expected.dtypes["dt1"] == "M8[ns]" + assert expected.dtypes["dt1"] == "M8[s]" assert expected.dtypes["dt2"] == "M8[s]" df = DataFrame(index=range(3)) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 5c1fa5483555b..47e307f561cf4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -97,6 +97,7 @@ def test_constructor_from_2d_datetimearray(self, using_array_manager): def test_constructor_dict_with_tzaware_scalar(self): # GH#42505 dt = Timestamp("2019-11-03 01:00:00-0700").tz_convert("America/Los_Angeles") + dt = dt.as_unit("ns") df = DataFrame({"dt": dt}, index=[0]) expected = DataFrame({"dt": [dt]}) @@ -926,7 +927,7 @@ def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): (Interval(left=0, right=5), IntervalDtype("int64", "right")), ( Timestamp("2011-01-01", tz="US/Eastern"), - DatetimeTZDtype(tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), ), ], ) @@ -1323,7 +1324,7 @@ def test_constructor_unequal_length_nested_list_column(self): [[Timestamp("2021-01-01")]], [{"x": Timestamp("2021-01-01")}], {"x": [Timestamp("2021-01-01")]}, - {"x": Timestamp("2021-01-01")}, + {"x": Timestamp("2021-01-01").as_unit("ns")}, ], ) def test_constructor_one_element_data_list(self, data): @@ -1814,7 +1815,6 @@ def test_constructor_single_value(self): def test_constructor_with_datetimes(self): intname = np.dtype(np.int_).name floatname = np.dtype(np.float_).name - datetime64name = np.dtype("M8[ns]").name objectname = np.dtype(np.object_).name # single item @@ -1832,7 +1832,7 @@ def test_constructor_with_datetimes(self): expected = Series( [np.dtype("int64")] + [np.dtype(objectname)] * 2 - + [np.dtype(datetime64name)] * 2, + + [np.dtype("M8[s]"), np.dtype("M8[us]")], index=list("ABCDE"), ) tm.assert_series_equal(result, expected) @@ -1912,7 +1912,7 @@ def test_constructor_with_datetimes3(self): df = DataFrame({"End Date": dt}, index=[0]) assert df.iat[0, 0] == dt tm.assert_series_equal( - df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"}) + df.dtypes, Series({"End Date": "datetime64[us, US/Eastern]"}) ) df = DataFrame([{"End Date": dt}]) @@ -3047,15 +3047,22 @@ def test_from_scalar_datetimelike_mismatched(self, constructor, cls): with pytest.raises(TypeError, match=msg): constructor(scalar, dtype=dtype) - @pytest.mark.xfail( - reason="Timestamp constructor has been updated to cast dt64 to non-nano, " - "but DatetimeArray._from_sequence has not" - ) @pytest.mark.parametrize("cls", [datetime, np.datetime64]) - def test_from_out_of_bounds_ns_datetime(self, constructor, cls): + def test_from_out_of_bounds_ns_datetime( + self, constructor, cls, request, box, frame_or_series + ): # scalar that won't fit in nanosecond dt64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="Timestamp constructor has been updated to cast dt64 to " + "non-nano, but DatetimeArray._from_sequence has not", + strict=True, + ) + request.node.add_marker(mark) + scalar = datetime(9999, 1, 1) exp_dtype = "M8[us]" # pydatetime objects default to this reso + if cls is np.datetime64: scalar = np.datetime64(scalar, "D") exp_dtype = "M8[s]" # closest reso to input @@ -3076,13 +3083,19 @@ def test_out_of_s_bounds_datetime64(self, constructor): dtype = tm.get_dtype(result) assert dtype == object - @pytest.mark.xfail( - reason="TimedeltaArray constructor has been updated to cast td64 to non-nano, " - "but TimedeltaArray._from_sequence has not" - ) @pytest.mark.parametrize("cls", [timedelta, np.timedelta64]) - def test_from_out_of_bounds_ns_timedelta(self, constructor, cls): + def test_from_out_of_bounds_ns_timedelta( + self, constructor, cls, request, box, frame_or_series + ): # scalar that won't fit in nanosecond td64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="TimedeltaArray constructor has been updated to cast td64 " + "to non-nano, but TimedeltaArray._from_sequence has not", + strict=True, + ) + request.node.add_marker(mark) + scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1) exp_dtype = "m8[us]" # smallest reso that fits if cls is np.timedelta64: diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 0cdb11cfbf6e0..79614e6beccaf 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -721,7 +721,9 @@ def func_with_date(batch): dfg_no_conversion_expected.index.name = "a" dfg_conversion = df.groupby(by=["a"]).apply(func_with_date) - dfg_conversion_expected = DataFrame({"b": datetime(2015, 1, 1), "c": 2}, index=[1]) + dfg_conversion_expected = DataFrame( + {"b": pd.Timestamp(2015, 1, 1).as_unit("ns"), "c": 2}, index=[1] + ) dfg_conversion_expected.index.name = "a" tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected) diff --git a/pandas/tests/groupby/test_groupby_shift_diff.py b/pandas/tests/groupby/test_groupby_shift_diff.py index 7ffee412e3cdf..656471b2f6eb0 100644 --- a/pandas/tests/groupby/test_groupby_shift_diff.py +++ b/pandas/tests/groupby/test_groupby_shift_diff.py @@ -62,7 +62,7 @@ def test_group_shift_with_fill_value(): def test_group_shift_lose_timezone(): # GH 30134 - now_dt = Timestamp.utcnow() + now_dt = Timestamp.utcnow().as_unit("ns") df = DataFrame({"a": [1, 1], "date": now_dt}) result = df.groupby("a").shift(0).iloc[0] expected = Series({"date": now_dt}, name=result.name) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index f9a1349081529..cfbecd3efd07e 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -715,7 +715,8 @@ def test_groupby_max_datetime64(self): # GH 5869 # datetimelike dtype conversion from int df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) - expected = df.groupby("A")["A"].apply(lambda x: x.max()) + # TODO: can we retain second reso in .apply here? + expected = df.groupby("A")["A"].apply(lambda x: x.max()).astype("M8[s]") result = df.groupby("A")["A"].max() tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 04e6f5d2fdcaa..d0e1343fbeb54 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -279,7 +279,9 @@ def test_transform_datetime_to_timedelta(): # GH 15429 # transforming a datetime to timedelta df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) - expected = Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A") + expected = Series( + Timestamp("20130101") - Timestamp("20130101"), index=range(5), name="A" + ) # this does date math without changing result type in transform base_time = df["A"][0] diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index e93cd836fa307..89190dae46169 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -446,7 +446,7 @@ def test_v12_compat(self, datapath): columns=["A", "B", "C", "D"], index=dti, ) - df["date"] = Timestamp("19920106 18:21:32.12") + df["date"] = Timestamp("19920106 18:21:32.12").as_unit("ns") df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101") df["modified"] = df["date"] df.iloc[1, df.columns.get_loc("modified")] = pd.NaT @@ -751,7 +751,7 @@ def test_axis_dates(self, datetime_series, datetime_frame): def test_convert_dates(self, datetime_series, datetime_frame): # frame df = datetime_frame - df["date"] = Timestamp("20130101") + df["date"] = Timestamp("20130101").as_unit("ns") json = df.to_json() result = read_json(json) @@ -767,7 +767,7 @@ def test_convert_dates(self, datetime_series, datetime_frame): tm.assert_frame_equal(result, expected) # series - ts = Series(Timestamp("20130101"), index=datetime_series.index) + ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index) json = ts.to_json() result = read_json(json, typ="series") tm.assert_series_equal(result, ts) @@ -831,7 +831,7 @@ def test_convert_dates_infer(self, infer_word): def test_date_format_frame(self, date, date_unit, datetime_frame): df = datetime_frame - df["date"] = Timestamp(date) + df["date"] = Timestamp(date).as_unit("ns") df.iloc[1, df.columns.get_loc("date")] = pd.NaT df.iloc[5, df.columns.get_loc("date")] = pd.NaT if date_unit: @@ -859,7 +859,7 @@ def test_date_format_frame_raises(self, datetime_frame): ], ) def test_date_format_series(self, date, date_unit, datetime_series): - ts = Series(Timestamp(date), index=datetime_series.index) + ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index) ts.iloc[1] = pd.NaT ts.iloc[5] = pd.NaT if date_unit: @@ -879,7 +879,7 @@ def test_date_format_series_raises(self, datetime_series): @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) def test_date_unit(self, unit, datetime_frame): df = datetime_frame - df["date"] = Timestamp("20130101 20:43:42") + df["date"] = Timestamp("20130101 20:43:42").as_unit("ns") dl = df.columns.get_loc("date") df.iloc[1, dl] = Timestamp("19710101 20:43:42") df.iloc[2, dl] = Timestamp("21460101 20:43:42") diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 81de4f13de81d..13d2c79025d1f 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -746,7 +746,12 @@ def test_date_parser_int_bug(all_parsers): def test_nat_parse(all_parsers): # see gh-3062 parser = all_parsers - df = DataFrame({"A": np.arange(10, dtype="float64"), "B": Timestamp("20010101")}) + df = DataFrame( + { + "A": np.arange(10, dtype="float64"), + "B": Timestamp("20010101").as_unit("ns"), + } + ) df.iloc[3:6, :] = np.nan with tm.ensure_clean("__nat_parse_.csv") as path: @@ -1902,7 +1907,9 @@ def test_date_parser_multiindex_columns(all_parsers): 1,2 2019-12-31,6""" result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1]) - expected = DataFrame({("a", "1"): Timestamp("2019-12-31"), ("b", "2"): [6]}) + expected = DataFrame( + {("a", "1"): Timestamp("2019-12-31").as_unit("ns"), ("b", "2"): [6]} + ) tm.assert_frame_equal(result, expected) @@ -1924,7 +1931,9 @@ def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, co parse_dates=parse_spec, header=[0, 1], ) - expected = DataFrame({col_name: Timestamp("2019-12-31"), ("c", "3"): [6]}) + expected = DataFrame( + {col_name: Timestamp("2019-12-31").as_unit("ns"), ("c", "3"): [6]} + ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index f818d621c744f..32231cbbdda64 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -88,7 +88,7 @@ def test_usecols_with_parse_dates3(all_parsers): parse_dates = [0] cols = { - "a": Timestamp("2016-09-21"), + "a": Timestamp("2016-09-21").as_unit("ns"), "b": [1], "c": [1], "d": [2], diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index c37e68f537ebb..b31a520924d5f 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -146,8 +146,8 @@ def test_append_some_nans(setup_path): "A2": np.random.randn(20), "B": "foo", "C": "bar", - "D": Timestamp("20010101"), - "E": datetime.datetime(2001, 1, 2, 0, 0), + "D": Timestamp("2001-01-01").as_unit("ns"), + "E": Timestamp("2001-01-02").as_unit("ns"), }, index=np.arange(20), ) @@ -247,8 +247,8 @@ def test_append_all_nans(setup_path): "A2": np.random.randn(20), "B": "foo", "C": "bar", - "D": Timestamp("20010101"), - "E": datetime.datetime(2001, 1, 2, 0, 0), + "D": Timestamp("2001-01-01").as_unit("ns"), + "E": Timestamp("2001-01-02").as_unit("ns"), }, index=np.arange(20), ) @@ -572,7 +572,7 @@ def check_col(key, name, size): df_dc.loc[df_dc.index[4:6], "string"] = np.nan df_dc.loc[df_dc.index[7:9], "string"] = "bar" df_dc["string2"] = "cool" - df_dc["datetime"] = Timestamp("20010102") + df_dc["datetime"] = Timestamp("20010102").as_unit("ns") df_dc.loc[df_dc.index[3:5], ["A", "B", "datetime"]] = np.nan _maybe_remove(store, "df_dc") @@ -654,8 +654,8 @@ def test_append_misc_chunksize(setup_path, chunksize): df["float322"] = 1.0 df["float322"] = df["float322"].astype("float32") df["bool"] = df["float322"] > 0 - df["time1"] = Timestamp("20130101") - df["time2"] = Timestamp("20130102") + df["time1"] = Timestamp("20130101").as_unit("ns") + df["time2"] = Timestamp("20130102").as_unit("ns") with ensure_clean_store(setup_path, mode="w") as store: store.append("obj", df, chunksize=chunksize) result = store.select("obj") @@ -767,12 +767,11 @@ def test_append_with_timedelta(setup_path): # GH 3577 # append timedelta + ts = Timestamp("20130101").as_unit("ns") df = DataFrame( { - "A": Timestamp("20130101"), - "B": [ - Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10) - ], + "A": ts, + "B": [ts + timedelta(days=i, seconds=10) for i in range(10)], } ) df["C"] = df["A"] - df["B"] diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py index d2b0519d6cf3d..910f83e0b997c 100644 --- a/pandas/tests/io/pytables/test_put.py +++ b/pandas/tests/io/pytables/test_put.py @@ -183,10 +183,10 @@ def test_put_mixed_type(setup_path): df["bool3"] = True df["int1"] = 1 df["int2"] = 2 - df["timestamp1"] = Timestamp("20010102") - df["timestamp2"] = Timestamp("20010103") - df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0) - df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0) + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") df.loc[df.index[3:6], ["obj1"]] = np.nan df = df._consolidate() diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 7a5b6ddd40334..2d87b719af36b 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -422,10 +422,10 @@ def test_table_mixed_dtypes(setup_path): df["bool3"] = True df["int1"] = 1 df["int2"] = 2 - df["timestamp1"] = Timestamp("20010102") - df["timestamp2"] = Timestamp("20010103") - df["datetime1"] = dt.datetime(2001, 1, 2, 0, 0) - df["datetime2"] = dt.datetime(2001, 1, 3, 0, 0) + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") df.loc[df.index[3:6], ["obj1"]] = np.nan df = df._consolidate() diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index 7589eb8e96a10..e6c0c918a73cc 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -50,7 +50,7 @@ def test_append_with_timezones(setup_path, gettz): df_est = DataFrame( { "A": [ - Timestamp("20130102 2:00:00", tz=gettz("US/Eastern")) + Timestamp("20130102 2:00:00", tz=gettz("US/Eastern")).as_unit("ns") + timedelta(hours=1) * i for i in range(5) ] @@ -61,24 +61,24 @@ def test_append_with_timezones(setup_path, gettz): # of DST transition df_crosses_dst = DataFrame( { - "A": Timestamp("20130102", tz=gettz("US/Eastern")), - "B": Timestamp("20130603", tz=gettz("US/Eastern")), + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130603", tz=gettz("US/Eastern")).as_unit("ns"), }, index=range(5), ) df_mixed_tz = DataFrame( { - "A": Timestamp("20130102", tz=gettz("US/Eastern")), - "B": Timestamp("20130102", tz=gettz("EET")), + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("EET")).as_unit("ns"), }, index=range(5), ) df_different_tz = DataFrame( { - "A": Timestamp("20130102", tz=gettz("US/Eastern")), - "B": Timestamp("20130102", tz=gettz("CET")), + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("CET")).as_unit("ns"), }, index=range(5), ) @@ -303,8 +303,8 @@ def test_legacy_datetimetz_object(datapath): # 8260 expected = DataFrame( { - "A": Timestamp("20130102", tz="US/Eastern"), - "B": Timestamp("20130603", tz="CET"), + "A": Timestamp("20130102", tz="US/Eastern").as_unit("ns"), + "B": Timestamp("20130603", tz="CET").as_unit("ns"), }, index=range(5), ) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 7238232a46e60..9750e8d32c844 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1352,8 +1352,14 @@ def test_constructor_dict_order(self): expected = Series([1, 0, 2], index=list("bac")) tm.assert_series_equal(result, expected) - def test_constructor_dict_extension(self, ea_scalar_and_dtype): + def test_constructor_dict_extension(self, ea_scalar_and_dtype, request): ea_scalar, ea_dtype = ea_scalar_and_dtype + if isinstance(ea_scalar, Timestamp): + mark = pytest.mark.xfail( + reason="Construction from dict goes through " + "maybe_convert_objects which casts to nano" + ) + request.node.add_marker(mark) d = {"a": ea_scalar} result = Series(d, index=["a"]) expected = Series(ea_scalar, index=["a"], dtype=ea_dtype) @@ -1465,7 +1471,7 @@ def test_fromValue(self, datetime_series): d = datetime.now() dates = Series(d, index=datetime_series.index) - assert dates.dtype == "M8[ns]" + assert dates.dtype == "M8[us]" assert len(dates) == len(datetime_series) # GH12336
- [x] closes #51196 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Needs whatsnew and targeted tests, opening to see if there are opinions about doing this as a bugfix or waiting to do it as an API change in 3.0
https://api.github.com/repos/pandas-dev/pandas/pulls/52212
2023-03-26T00:41:49Z
2023-05-18T16:11:48Z
2023-05-18T16:11:48Z
2023-05-18T17:16:02Z
CI: Test pyarrow nightly instead of intermediate versions
diff --git a/.github/actions/setup-conda/action.yml b/.github/actions/setup-conda/action.yml index efc31bba88f28..329dc24d466b4 100644 --- a/.github/actions/setup-conda/action.yml +++ b/.github/actions/setup-conda/action.yml @@ -9,20 +9,9 @@ inputs: extra-specs: description: Extra packages to install required: false - pyarrow-version: - description: If set, overrides the PyArrow version in the Conda environment to the given string. - required: false runs: using: composite steps: - - name: Set Arrow version in ${{ inputs.environment-file }} to ${{ inputs.pyarrow-version }} - run: | - grep -q ' - pyarrow' ${{ inputs.environment-file }} - sed -i"" -e "s/ - pyarrow/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }} - cat ${{ inputs.environment-file }} - shell: bash - if: ${{ inputs.pyarrow-version }} - - name: Install ${{ inputs.environment-file }} uses: mamba-org/provision-with-micromamba@v12 with: diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml index 15308d0c086f6..7ed5f5b90b959 100644 --- a/.github/workflows/macos-windows.yml +++ b/.github/workflows/macos-windows.yml @@ -52,7 +52,6 @@ jobs: uses: ./.github/actions/setup-conda with: environment-file: ci/deps/${{ matrix.env_file }} - pyarrow-version: ${{ matrix.os == 'macos-latest' && '9' || '' }} - name: Build Pandas uses: ./.github/actions/build_pandas diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 08dd09e57871b..97ca346142ec1 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -28,7 +28,6 @@ jobs: env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml, actions-311.yaml] # Prevent the include jobs from overriding other jobs pattern: [""] - pyarrow_version: ["8", "9", "10"] include: - name: "Downstream Compat" env_file: actions-38-downstream_compat.yaml @@ -76,21 +75,11 @@ jobs: # TODO(cython3): Re-enable once next-beta(after beta 1) comes out # There are some warnings failing the build with -werror pandas_ci: "0" - exclude: - - env_file: actions-38.yaml - pyarrow_version: "8" - - env_file: actions-38.yaml - pyarrow_version: "9" - - env_file: actions-39.yaml - pyarrow_version: "8" - - env_file: actions-39.yaml - pyarrow_version: "9" - - env_file: actions-310.yaml - pyarrow_version: "8" - - env_file: actions-310.yaml - pyarrow_version: "9" + - name: "Pyarrow Nightly" + env_file: actions-311-pyarrownightly.yaml + pattern: "not slow and not network and not single_cpu" fail-fast: false - name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }} + name: ${{ matrix.name || matrix.env_file }} env: ENV_FILE: ci/deps/${{ matrix.env_file }} PATTERN: ${{ matrix.pattern }} @@ -108,7 +97,7 @@ jobs: COVERAGE: ${{ !contains(matrix.env_file, 'pypy') }} concurrency: # https://github.community/t/concurrecy-not-work-for-push/183068/7 - group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.pattern }}-${{ matrix.pyarrow_version || '' }}-${{ matrix.extra_apt || '' }}-${{ matrix.pandas_data_manager || '' }} + group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.pattern }}-${{ matrix.extra_apt || '' }}-${{ matrix.pandas_data_manager || '' }} cancel-in-progress: true services: @@ -167,7 +156,6 @@ jobs: uses: ./.github/actions/setup-conda with: environment-file: ${{ env.ENV_FILE }} - pyarrow-version: ${{ matrix.pyarrow_version }} - name: Build Pandas id: build diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index f40b555593f6b..47405b72476fd 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -41,7 +41,7 @@ dependencies: - psycopg2>=2.8.6 - pymysql>=1.0.2 - pytables>=3.6.1 - - pyarrow + - pyarrow>=7.0.0 - pyreadstat>=1.1.2 - python-snappy>=0.6.0 - pyxlsb>=1.0.8 diff --git a/ci/deps/actions-311-pyarrownightly.yaml b/ci/deps/actions-311-pyarrownightly.yaml new file mode 100644 index 0000000000000..77e4fc9d2c2d9 --- /dev/null +++ b/ci/deps/actions-311-pyarrownightly.yaml @@ -0,0 +1,29 @@ +name: pandas-dev +channels: + - conda-forge +dependencies: + - python=3.11 + + # build dependencies + - versioneer[toml] + - cython>=0.29.33 + + # test dependencies + - pytest>=7.0.0 + - pytest-cov + - pytest-xdist>=2.2.0 + - hypothesis>=6.34.2 + - pytest-asyncio>=0.17.0 + + # required dependencies + - python-dateutil + - numpy + - pytz + - pip + + - pip: + - "tzdata>=2022.1" + - "--extra-index-url https://pypi.fury.io/arrow-nightlies/" + - "--prefer-binary" + - "--pre" + - "pyarrow" diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index fa08bdf438dff..9ebfb710e0abb 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -41,7 +41,7 @@ dependencies: - psycopg2>=2.8.6 - pymysql>=1.0.2 # - pytables>=3.8.0 # first version that supports 3.11 - - pyarrow + - pyarrow>=7.0.0 - pyreadstat>=1.1.2 - python-snappy>=0.6.0 - pyxlsb>=1.0.8 diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml index a9265bd84ee87..3ed2786b76896 100644 --- a/ci/deps/actions-38-downstream_compat.yaml +++ b/ci/deps/actions-38-downstream_compat.yaml @@ -39,7 +39,7 @@ dependencies: - openpyxl<3.1.1, >=3.0.7 - odfpy>=1.4.1 - psycopg2>=2.8.6 - - pyarrow + - pyarrow>=7.0.0 - pymysql>=1.0.2 - pyreadstat>=1.1.2 - pytables>=3.6.1 diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml index 27872514447a5..4060a837d1757 100644 --- a/ci/deps/actions-38.yaml +++ b/ci/deps/actions-38.yaml @@ -39,7 +39,7 @@ dependencies: - odfpy>=1.4.1 - pandas-gbq>=0.15.0 - psycopg2>=2.8.6 - - pyarrow + - pyarrow>=7.0.0 - pymysql>=1.0.2 - pyreadstat>=1.1.2 - pytables>=3.6.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 4b0575d8a3afd..53cd9c5635493 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -40,7 +40,7 @@ dependencies: - pandas-gbq>=0.15.0 - psycopg2>=2.8.6 - pymysql>=1.0.2 - - pyarrow + - pyarrow>=7.0.0 - pyreadstat>=1.1.2 - pytables>=3.6.1 - python-snappy>=0.6.0 diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml index c3d89e735ae37..2e4070fa82010 100644 --- a/ci/deps/circle-38-arm64.yaml +++ b/ci/deps/circle-38-arm64.yaml @@ -39,7 +39,7 @@ dependencies: - odfpy>=1.4.1 - pandas-gbq>=0.15.0 - psycopg2>=2.8.6 - - pyarrow + - pyarrow>=7.0.0 - pymysql>=1.0.2 # Not provided on ARM #- pyreadstat diff --git a/environment.yml b/environment.yml index f29ade1dc5173..5aa1fad2e51c7 100644 --- a/environment.yml +++ b/environment.yml @@ -42,7 +42,7 @@ dependencies: - odfpy>=1.4.1 - py - psycopg2>=2.8.6 - - pyarrow + - pyarrow>=7.0.0 - pymysql>=1.0.2 - pyreadstat>=1.1.2 - pytables>=3.6.1 diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 7791ca53a6447..30dfceb29155a 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -92,22 +92,18 @@ def _get_path_or_handle( if fs is not None: pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore") fsspec = import_optional_dependency("fsspec", errors="ignore") - if pa_fs is None and fsspec is None: - raise ValueError( - f"filesystem must be a pyarrow or fsspec FileSystem, " - f"not a {type(fs).__name__}" - ) - elif (pa_fs is not None and not isinstance(fs, pa_fs.FileSystem)) and ( - fsspec is not None and not isinstance(fs, fsspec.spec.AbstractFileSystem) - ): + if pa_fs is not None and isinstance(fs, pa_fs.FileSystem): + if storage_options: + raise NotImplementedError( + "storage_options not supported with a pyarrow FileSystem." + ) + elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem): + pass + else: raise ValueError( f"filesystem must be a pyarrow or fsspec FileSystem, " f"not a {type(fs).__name__}" ) - elif pa_fs is not None and isinstance(fs, pa_fs.FileSystem) and storage_options: - raise NotImplementedError( - "storage_options not supported with a pyarrow FileSystem." - ) if is_fsspec_url(path_or_handle) and fs is None: if storage_options is None: pa = import_optional_dependency("pyarrow") diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index dd0b43c116266..7e4869589cee6 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -12,6 +12,7 @@ import pandas as pd import pandas._testing as tm from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.util.version import Version @pytest.fixture @@ -406,15 +407,14 @@ def test_fillna_args(dtype, request): arr.fillna(value=1) -@td.skip_if_no("pyarrow") def test_arrow_array(dtype): # protocol added in 0.15.0 - import pyarrow as pa + pa = pytest.importorskip("pyarrow") data = pd.array(["a", "b", "c"], dtype=dtype) arr = pa.array(data) expected = pa.array(list(data), type=pa.string(), from_pandas=True) - if dtype.storage == "pyarrow": + if dtype.storage == "pyarrow" and Version(pa.__version__) <= Version("11.0.0"): expected = pa.chunked_array(expected) assert arr.equals(expected) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index b55e97a4fe0ae..c74548bf63e06 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1019,7 +1019,10 @@ def test_read_dtype_backend_pyarrow_config_index(self, pa): {"a": [1, 2]}, index=pd.Index([3, 4], name="test"), dtype="int64[pyarrow]" ) expected = df.copy() + import pyarrow + if Version(pyarrow.__version__) > Version("11.0.0"): + expected.index = expected.index.astype("int64[pyarrow]") check_round_trip( df, engine=pa, diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index 8ff78cc073acf..714588d179aef 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -65,7 +65,7 @@ def test_show_versions_console(capsys): assert re.search(r"numpy\s*:\s[0-9]+\..*\n", result) # check optional dependency - assert re.search(r"pyarrow\s*:\s([0-9\.]+|None)\n", result) + assert re.search(r"pyarrow\s*:\s([0-9]+.*|None)\n", result) def test_json_output_match(capsys, tmpdir): diff --git a/requirements-dev.txt b/requirements-dev.txt index 9c0bdc64d6e07..f3c9649a5a707 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -31,7 +31,7 @@ openpyxl<3.1.1, >=3.0.7 odfpy>=1.4.1 py psycopg2-binary>=2.8.6 -pyarrow +pyarrow>=7.0.0 pymysql>=1.0.2 pyreadstat>=1.1.2 tables>=3.6.1 diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py index b6016a35e3dbb..e0182ebaaee60 100755 --- a/scripts/validate_min_versions_in_sync.py +++ b/scripts/validate_min_versions_in_sync.py @@ -37,7 +37,7 @@ YAML_PATH = pathlib.Path("ci/deps") ENV_PATH = pathlib.Path("environment.yml") EXCLUDE_DEPS = {"tzdata", "blosc"} -EXCLUSION_LIST = frozenset(["python=3.8[build=*_pypy]", "pyarrow"]) +EXCLUSION_LIST = frozenset(["python=3.8[build=*_pypy]"]) # pandas package is not available # in pre-commit environment sys.path.append("pandas/compat")
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52211
2023-03-25T21:49:10Z
2023-03-30T23:52:36Z
2023-03-30T23:52:35Z
2023-07-19T22:59:44Z
CI: Use auto pytest workers for Windows
diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml index 15308d0c086f6..7b66a7ef51853 100644 --- a/.github/workflows/macos-windows.yml +++ b/.github/workflows/macos-windows.yml @@ -17,6 +17,7 @@ env: PANDAS_CI: 1 PYTEST_TARGET: pandas PATTERN: "not slow and not db and not network and not single_cpu" + PYTEST_WORKERS: "auto" permissions: contents: read @@ -38,9 +39,6 @@ jobs: # https://github.community/t/concurrecy-not-work-for-push/183068/7 group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }} cancel-in-progress: true - env: - # GH 47443: PYTEST_WORKERS > 1 crashes Windows builds with memory related errors - PYTEST_WORKERS: ${{ matrix.os == 'macos-latest' && 'auto' || '1' }} steps: - name: Checkout
- [ ] closes #47443 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52210
2023-03-25T21:31:57Z
2023-03-27T22:14:44Z
null
2023-03-27T22:14:49Z
DOC: getting_started tutorials nbviewer broken link structure fixed
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index bff50bb1e4c2d..1220c915c3cbc 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -113,7 +113,7 @@ Various tutorials * `Wes McKinney's (pandas BDFL) blog <https://wesmckinney.com/archives.html>`_ * `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson <http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/>`_ * `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <https://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_ -* `Financial analysis in Python, by Thomas Wiecki <https://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ +* `Financial analysis in Python, by Thomas Wiecki <https://nbviewer.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ * `Intro to pandas data structures, by Greg Reda <http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/>`_ * `Pandas and Python: Top 10, by Manish Amde <https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_ * `Pandas DataFrames Tutorial, by Karlijn Willems <https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_
- [x] closes #52208 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. [This link](https://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb) (currently on the documentation) is broken, given the change in nbviewer link structure from nbviewer.ipython.org to nbviewer.org. This PR corrects the link to help maintain pandas documentation. Hope this helps!
https://api.github.com/repos/pandas-dev/pandas/pulls/52209
2023-03-25T20:42:28Z
2023-03-26T03:48:34Z
2023-03-26T03:48:34Z
2023-03-26T20:49:59Z
DOC: Add replace & map to See Also section
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bef7022a7d10f..53cd9c6476c75 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9913,6 +9913,7 @@ def applymap( See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.replace: Replace values given in `to_replace` with `value`. Examples -------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 06c744c3e36fa..cc4ec4cf23683 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4247,6 +4247,7 @@ def map( See Also -------- Series.apply : For applying more complex functions on a Series. + Series.replace: Replace values given in `to_replace` with `value`. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 4297aa0f20fc9..f421ba448c97a 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -599,6 +599,8 @@ -------- {klass}.fillna : Fill NA values. {klass}.where : Replace values based on boolean condition. + DataFrame.applymap: Apply a function to a Dataframe elementwise. + Series.map: Map values of Series according to an input mapping or function. Series.str.replace : Simple string replacement. Notes
See PR title, adding some references.
https://api.github.com/repos/pandas-dev/pandas/pulls/52207
2023-03-25T18:56:56Z
2023-03-27T21:03:12Z
2023-03-27T21:03:12Z
2023-03-27T21:31:38Z
REF: simplify DataFrame.applymap
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bef7022a7d10f..6ee9b357170bd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9882,7 +9882,7 @@ def apply( return op.apply().__finalize__(self, method="apply") def applymap( - self, func: PythonFuncType, na_action: str | None = None, **kwargs + self, func: PythonFuncType, na_action: Literal["ignore"] | None = None, **kwargs ) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -9955,14 +9955,10 @@ def applymap( raise ValueError( f"na_action must be 'ignore' or None. Got {repr(na_action)}" ) - ignore_na = na_action == "ignore" func = functools.partial(func, **kwargs) - # if we have a dtype == 'M8[ns]', provide boxed values - def infer(x): - if x.empty: - return lib.map_infer(x, func, ignore_na=ignore_na) - return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) + def infer(x: Series) -> Series: + return x.map(func, na_action=na_action) return self.apply(infer).__finalize__(self, "applymap")
This PR clarifies that `DataFrame.applymap` is actually the same as calling `Series.map` on each column. Also, this will hit `ExtensionArray.map`, which the version in the main branch didn't.
https://api.github.com/repos/pandas-dev/pandas/pulls/52206
2023-03-25T18:56:19Z
2023-03-25T20:07:18Z
null
2023-05-20T19:54:26Z
POC: rollback_array
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 9718641e75f60..ea148def47fa8 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2235,6 +2235,20 @@ cdef class YearOffset(SingleConstructorOffset): ) return shifted + @apply_array_wraps + def rollback_array(self, dtarr): + reso = get_unit_from_dtype(dtarr.dtype) + shifted = shift_quarters( + dtarr.view("i8"), + self.n, + self.month, + self._day_opt, + modby=12, + reso=reso, + roll=True, + ) + return shifted + cdef class BYearEnd(YearOffset): """ @@ -2406,6 +2420,20 @@ cdef class QuarterOffset(SingleConstructorOffset): ) return shifted + @apply_array_wraps + def rollback_array(self, dtarr): + reso = get_unit_from_dtype(dtarr.dtype) + shifted = shift_quarters( + dtarr.view("i8"), + self.n, + self.startingMonth, + self._day_opt, + modby=3, + reso=reso, + roll=True, + ) + return shifted + cdef class BQuarterEnd(QuarterOffset): """ @@ -2532,6 +2560,18 @@ cdef class MonthOffset(SingleConstructorOffset): shifted = shift_months(dtarr.view("i8"), self.n, self._day_opt, reso=reso) return shifted + @apply_array_wraps + def rollback_array(self, dtarr): + reso = get_unit_from_dtype(dtarr.dtype) + shifted = shift_months( + dtarr.view("i8"), + self.n, + self._day_opt, + reso=reso, + roll=True, + ) + return shifted + cpdef __setstate__(self, state): state.pop("_use_relativedelta", False) state.pop("offset", None) @@ -4310,6 +4350,7 @@ cdef ndarray shift_quarters( str day_opt, int modby=3, NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns, + bint roll=False, ): """ Given an int64 array representing nanosecond timestamps, shift all elements @@ -4353,13 +4394,26 @@ cdef ndarray shift_quarters( n = quarters months_since = (dts.month - q1start_month) % modby - n = _roll_qtrday(&dts, n, months_since, day_opt) + if roll: + if months_since == 0 and dts.day == get_day_of_month(&dts, day_opt): + # already on_offset + res_val = val + else: + n = _roll_qtrday(&dts, -1, months_since, day_opt) + + dts.year = year_add_months(dts, modby * n - months_since) + dts.month = month_add_months(dts, modby * n - months_since) + dts.day = get_day_of_month(&dts, day_opt) + + res_val = npy_datetimestruct_to_datetime(reso, &dts) + else: + n = _roll_qtrday(&dts, n, months_since, day_opt) - dts.year = year_add_months(dts, modby * n - months_since) - dts.month = month_add_months(dts, modby * n - months_since) - dts.day = get_day_of_month(&dts, day_opt) + dts.year = year_add_months(dts, modby * n - months_since) + dts.month = month_add_months(dts, modby * n - months_since) + dts.day = get_day_of_month(&dts, day_opt) - res_val = npy_datetimestruct_to_datetime(reso, &dts) + res_val = npy_datetimestruct_to_datetime(reso, &dts) # Analogous to: out[i] = res_val (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val @@ -4376,6 +4430,7 @@ def shift_months( int months, str day_opt=None, NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns, + bint roll=False, ): """ Given an int64-based datetime index, shift all elements @@ -4413,11 +4468,23 @@ def shift_months( res_val = NPY_NAT else: pandas_datetime_to_datetimestruct(val, reso, &dts) - dts.year = year_add_months(dts, months) - dts.month = month_add_months(dts, months) + if roll: + if dts.day == get_days_in_month(dts.year, dts.month): + # i.e. we are on_offset + res_val = val + else: + # Roll back to the previous month + dts.year = year_add_months(dts, -1) + dts.month = month_add_months(dts, -1) + dts.day = get_days_in_month(dts.year, dts.month) + res_val = npy_datetimestruct_to_datetime(reso, &dts) - dts.day = min(dts.day, get_days_in_month(dts.year, dts.month)) - res_val = npy_datetimestruct_to_datetime(reso, &dts) + else: + dts.year = year_add_months(dts, months) + dts.month = month_add_months(dts, months) + + dts.day = min(dts.day, get_days_in_month(dts.year, dts.month)) + res_val = npy_datetimestruct_to_datetime(reso, &dts) # Analogous to: out[i] = res_val (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val @@ -4435,15 +4502,26 @@ def shift_months( res_val = NPY_NAT else: pandas_datetime_to_datetimestruct(val, reso, &dts) - months_to_roll = months + if roll: + if dts.day == get_day_of_month(&dts, day_opt): + # i.e. we are already on_offset + res_val = val + else: + months_to_roll = _roll_qtrday(&dts, -1, 0, day_opt) + dts.year = year_add_months(dts, months_to_roll) + dts.month = month_add_months(dts, months_to_roll) + dts.day = get_day_of_month(&dts, day_opt) + + res_val = npy_datetimestruct_to_datetime(reso, &dts) - months_to_roll = _roll_qtrday(&dts, months_to_roll, 0, day_opt) + else: + months_to_roll = _roll_qtrday(&dts, months, 0, day_opt) - dts.year = year_add_months(dts, months_to_roll) - dts.month = month_add_months(dts, months_to_roll) - dts.day = get_day_of_month(&dts, day_opt) + dts.year = year_add_months(dts, months_to_roll) + dts.month = month_add_months(dts, months_to_roll) + dts.day = get_day_of_month(&dts, day_opt) - res_val = npy_datetimestruct_to_datetime(reso, &dts) + res_val = npy_datetimestruct_to_datetime(reso, &dts) # Analogous to: out[i] = res_val (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref #7449, #52105 This implements rollback_array for Month, Quarter, and Year offsets. I've only checked the FooBegin cases and only in non-tzaware cases: ``` dti = pd.date_range("2016-01-01", periods=60, freq="15D") mth = pd.offsets.MonthBegin(1) qtr = pd.offsets.QuarterBegin(1) yr = pd.offsets.YearBegin(1) res_mth = pd.DatetimeIndex(mth.rollback_array(dti)) res_qtr = pd.DatetimeIndex(qtr.rollback_array(dti)) res_yr = pd.DatetimeIndex(yr.rollback_array(dti)) exp_mth = pd.DatetimeIndex([mth.rollback(x) for x in dti]) exp_qtr = pd.DatetimeIndex([qtr.rollback(x) for x in dti]) exp_yr = pd.DatetimeIndex([yr.rollback(x) for x in dti]) assert res_mth.equals(exp_mth) assert res_qtr.equals(exp_qtr) assert res_yr.equals(exp_yr) ``` Posting this as a POC so someone else can take it the rest of the way. If no one does I'll circle back to it after focusing on my "do less work on the weekends" goal.
https://api.github.com/repos/pandas-dev/pandas/pulls/52205
2023-03-25T18:10:10Z
2023-06-23T16:31:23Z
null
2023-06-23T16:31:28Z
DOC: Update timestamp limitations
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 4cd98c89e7180..2c93efb128613 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -507,7 +507,8 @@ used if a custom frequency string is passed. Timestamp limitations --------------------- -Since pandas represents timestamps in nanosecond resolution, the time span that +The limits of timestamp representation depend on the chosen resolution. For +nanosecond resolution, the time span that can be represented using a 64-bit integer is limited to approximately 584 years: .. ipython:: python @@ -515,6 +516,9 @@ can be represented using a 64-bit integer is limited to approximately 584 years: pd.Timestamp.min pd.Timestamp.max +When choosing second-resolution, the available range grows to ``+/- 2.9e11 years``. +Different resolutions can be converted to each other through ``as_unit``. + .. seealso:: :ref:`timeseries.oob`
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @jbrockmendel thoughts about updating this? Or should we rather start with explaining resolutions in general?
https://api.github.com/repos/pandas-dev/pandas/pulls/52204
2023-03-25T17:27:08Z
2023-03-28T09:57:15Z
2023-03-28T09:57:15Z
2023-03-29T14:02:57Z
ASV: Add benchmark when comparing datetimes with different reso
diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py index ab3b38fee1b06..4fd9740f184c8 100644 --- a/asv_bench/benchmarks/arithmetic.py +++ b/asv_bench/benchmarks/arithmetic.py @@ -266,10 +266,14 @@ def setup(self, tz): self.ts = self.s[halfway] self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz)) + self.ts_different_reso = Timestamp("2001-01-02", tz=tz) def time_series_timestamp_compare(self, tz): self.s <= self.ts + def time_series_timestamp_different_reso_compare(self, tz): + self.s <= self.ts_different_reso + def time_timestamp_series_compare(self, tz): self.ts >= self.s
- [ ] xref #52080 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52203
2023-03-25T17:18:15Z
2023-03-27T21:06:13Z
2023-03-27T21:06:13Z
2023-03-29T14:04:10Z
ENH: Add dtype of categories to repr of CategoricalDtype
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index bac567b537edc..cbb2138ae96f2 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -38,6 +38,7 @@ Other enhancements - Improved error message when creating a DataFrame with empty data (0 rows), no index and an incorrect number of columns. (:issue:`52084`) - :meth:`DataFrame.applymap` now uses the :meth:`~api.extensions.ExtensionArray.map` method of underlying :class:`api.extensions.ExtensionArray` instances (:issue:`52219`) - :meth:`arrays.SparseArray.map` now supports ``na_action`` (:issue:`52096`). +- Add dtype of categories to ``repr`` information of :class:`CategoricalDtype` (:issue:`52179`) .. --------------------------------------------------------------------------- .. _whatsnew_210.notable_bug_fixes: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d302085275757..26a23f59d7dc6 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -253,11 +253,11 @@ def _from_values_or_dtype( Examples -------- >>> pd.CategoricalDtype._from_values_or_dtype() - CategoricalDtype(categories=None, ordered=None) + CategoricalDtype(categories=None, ordered=None, categories_dtype=None) >>> pd.CategoricalDtype._from_values_or_dtype( ... categories=['a', 'b'], ordered=True ... ) - CategoricalDtype(categories=['a', 'b'], ordered=True) + CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False) >>> c = pd.Categorical([0, 1], dtype=dtype1, fastpath=True) @@ -272,7 +272,7 @@ def _from_values_or_dtype( The supplied dtype takes precedence over values' dtype: >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) - CategoricalDtype(categories=['x', 'y'], ordered=False) + CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object) """ if dtype is not None: @@ -429,13 +429,19 @@ def __eq__(self, other: Any) -> bool: def __repr__(self) -> str_type: if self.categories is None: data = "None" + dtype = "None" else: data = self.categories._format_data(name=type(self).__name__) if data is None: # self.categories is RangeIndex data = str(self.categories._range) data = data.rstrip(", ") - return f"CategoricalDtype(categories={data}, ordered={self.ordered})" + dtype = self.categories.dtype + + return ( + f"CategoricalDtype(categories={data}, ordered={self.ordered}, " + f"categories_dtype={dtype})" + ) @cache_readonly def _hash_categories(self) -> int: diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 35ea4dc911fa8..41a969839c9bd 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -181,7 +181,7 @@ def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: ... "ordered": True, ... } ... ) - CategoricalDtype(categories=['a', 'b', 'c'], ordered=True) + CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object) >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"}) 'datetime64[ns]' diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index e862a6985160b..768a1551a8d58 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -211,7 +211,10 @@ def test_repr_range_categories(self): dtype = CategoricalDtype(categories=rng, ordered=False) result = repr(dtype) - expected = "CategoricalDtype(categories=range(0, 3), ordered=False)" + expected = ( + "CategoricalDtype(categories=range(0, 3), ordered=False, " + "categories_dtype=int64)" + ) assert result == expected def test_update_dtype(self): @@ -220,6 +223,15 @@ def test_update_dtype(self): expected = CategoricalDtype(["b"], ordered=True) assert result == expected + def test_repr(self): + cat = Categorical(pd.Index([1, 2, 3], dtype="int32")) + result = cat.dtype.__repr__() + expected = ( + "CategoricalDtype(categories=[1, 2, 3], ordered=False, " + "categories_dtype=int32)" + ) + assert result == expected + class TestDatetimeTZDtype(Base): @pytest.fixture @@ -980,7 +992,10 @@ def test_str_vs_repr(self, ordered): c1 = CategoricalDtype(["a", "b"], ordered=ordered) assert str(c1) == "category" # Py2 will have unicode prefixes - pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)" + pat = ( + r"CategoricalDtype\(categories=\[.*\], ordered={ordered}, " + r"categories_dtype=object\)" + ) assert re.match(pat.format(ordered=ordered), repr(c1)) def test_categorical_categories(self): diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index 250bee02e06f4..4056072e71d09 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -209,9 +209,10 @@ def test_index_equal_category_mismatch(check_categorical): msg = """Index are different Attribute "dtype" are different -\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\) +\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \ +categories_dtype=object\\) \\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \ -ordered=False\\)""" +ordered=False, categories_dtype=object\\)""" idx1 = Index(Categorical(["a", "b"])) idx2 = Index(Categorical(["a", "b"], categories=["a", "b", "c"])) diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 835f710842cc0..dd28773f08cc4 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -250,9 +250,10 @@ def test_series_equal_categorical_mismatch(check_categorical): msg = """Attributes of Series are different Attribute "dtype" are different -\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\) +\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \ +categories_dtype=object\\) \\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \ -ordered=False\\)""" +ordered=False, categories_dtype=object\\)""" s1 = Series(Categorical(["a", "b"])) s2 = Series(Categorical(["a", "b"], categories=list("abc")))
- [x] closes #52179 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52202
2023-03-25T17:12:43Z
2023-03-29T21:39:48Z
2023-03-29T21:39:48Z
2023-03-30T16:56:02Z
ENH: add `__from_pyarrow__` support to `DatetimeTZDtype`
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index c3355757350b9..1e1c9517d5ef7 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -85,6 +85,7 @@ Other enhancements - Add dtype of categories to ``repr`` information of :class:`CategoricalDtype` (:issue:`52179`) - Added to the escape mode "latex-math" preserving without escaping all characters between "\(" and "\)" in formatter (:issue:`51903`) - Adding ``engine_kwargs`` parameter to :meth:`DataFrame.read_excel` (:issue:`52214`) +- Implemented ``__from_arrow__`` on :class:`DatetimeTZDtype`. (:issue:`52201`) - Implemented ``__pandas_priority__`` to allow custom types to take precedence over :class:`DataFrame`, :class:`Series`, :class:`Index`, or :class:`ExtensionArray` for arithmetic operations, :ref:`see the developer guide <extending.pandas_priority>` (:issue:`48347`) - Improve error message when having incompatible columns using :meth:`DataFrame.merge` (:issue:`51861`) - Improve error message when setting :class:`DataFrame` with wrong number of columns through :meth:`DataFrame.isetitem` (:issue:`51701`) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 12245a144ec2a..5f655a89abf99 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2344,7 +2344,9 @@ def _validate_dt64_dtype(dtype): # a tz-aware Timestamp (with a tz specific to its datetime) will # be incorrect(ish?) for the array as a whole dtype = cast(DatetimeTZDtype, dtype) - dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz)) + dtype = DatetimeTZDtype( + unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz) + ) return dtype diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 4d336f1edbb2d..65b612ce019dd 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -817,6 +817,40 @@ def __eq__(self, other: Any) -> bool: and tz_compare(self.tz, other.tz) ) + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> DatetimeArray: + """ + Construct DatetimeArray from pyarrow Array/ChunkedArray. + + Note: If the units in the pyarrow Array are the same as this + DatetimeDtype, then values corresponding to the integer representation + of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`) + are converted to ``NaT``, regardless of the null indicator in the + pyarrow array. + + Parameters + ---------- + array : pyarrow.Array or pyarrow.ChunkedArray + The Arrow array to convert to DatetimeArray. + + Returns + ------- + extension array : DatetimeArray + """ + import pyarrow + + from pandas.core.arrays import DatetimeArray + + array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True) + + if isinstance(array, pyarrow.Array): + np_arr = array.to_numpy(zero_copy_only=False) + else: + np_arr = array.to_numpy() + + return DatetimeArray(np_arr, dtype=self, copy=False) + def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to diff --git a/pandas/tests/arrays/datetimes/test_constructors.py b/pandas/tests/arrays/datetimes/test_constructors.py index bbc66dcd328c3..30f47e37fedf5 100644 --- a/pandas/tests/arrays/datetimes/test_constructors.py +++ b/pandas/tests/arrays/datetimes/test_constructors.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas._libs import iNaT + from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd @@ -168,3 +170,87 @@ def test_2d(self, order): res = DatetimeArray._from_sequence(arr) expected = DatetimeArray._from_sequence(arr.ravel()).reshape(arr.shape) tm.assert_datetime_array_equal(res, expected) + + +# ---------------------------------------------------------------------------- +# Arrow interaction + + +EXTREME_VALUES = [0, 123456789, None, iNaT, 2**63 - 1, -(2**63) + 1] +FINE_TO_COARSE_SAFE = [123_000_000_000, None, -123_000_000_000] +COARSE_TO_FINE_SAFE = [123, None, -123] + + +@pytest.mark.parametrize( + ("pa_unit", "pd_unit", "pa_tz", "pd_tz", "data"), + [ + ("s", "s", "UTC", "UTC", EXTREME_VALUES), + ("ms", "ms", "UTC", "Europe/Berlin", EXTREME_VALUES), + ("us", "us", "US/Eastern", "UTC", EXTREME_VALUES), + ("ns", "ns", "US/Central", "Asia/Kolkata", EXTREME_VALUES), + ("ns", "s", "UTC", "UTC", FINE_TO_COARSE_SAFE), + ("us", "ms", "UTC", "Europe/Berlin", FINE_TO_COARSE_SAFE), + ("ms", "us", "US/Eastern", "UTC", COARSE_TO_FINE_SAFE), + ("s", "ns", "US/Central", "Asia/Kolkata", COARSE_TO_FINE_SAFE), + ], +) +def test_from_arrowtest_from_arrow_with_different_units_and_timezones_with_( + pa_unit, pd_unit, pa_tz, pd_tz, data +): + pa = pytest.importorskip("pyarrow") + + pa_type = pa.timestamp(pa_unit, tz=pa_tz) + arr = pa.array(data, type=pa_type) + dtype = DatetimeTZDtype(unit=pd_unit, tz=pd_tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray( + np.array(data, dtype=f"datetime64[{pa_unit}]").astype(f"datetime64[{pd_unit}]"), + dtype=dtype, + ) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + ("unit", "tz"), + [ + ("s", "UTC"), + ("ms", "Europe/Berlin"), + ("us", "US/Eastern"), + ("ns", "Asia/Kolkata"), + ("ns", "UTC"), + ], +) +def test_from_arrow_from_empty(unit, tz): + pa = pytest.importorskip("pyarrow") + + data = [] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit=unit, tz=tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray(np.array(data, dtype=f"datetime64[{unit}]")) + expected = expected.tz_localize(tz=tz) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +def test_from_arrow_from_integers(): + pa = pytest.importorskip("pyarrow") + + data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit="ns", tz="UTC") + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray(np.array(data, dtype="datetime64[ns]")) + expected = expected.tz_localize("UTC") + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected)
- [x] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Closes #52200
https://api.github.com/repos/pandas-dev/pandas/pulls/52201
2023-03-25T14:58:42Z
2023-04-17T16:41:18Z
2023-04-17T16:41:18Z
2023-04-17T17:02:10Z
add extra test case in the test_constructor_str_infer_reso
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index ca0796e55f28d..7e4002dc3a0cf 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -58,6 +58,12 @@ def test_constructor_str_infer_reso(self): ts = Timestamp("2016 June 3 15:25:01.345") assert ts.unit == "ms" + ts = Timestamp("300-01-01") + assert ts.unit == "s" + + ts = Timestamp("300 June 1:30:01.300") + assert ts.unit == "ms" + def test_constructor_from_iso8601_str_with_offset_reso(self): # GH#49737 ts = Timestamp("2016-01-01 04:05:06-01:00")
- [x] closes #51025 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). An extra test case is added to `test_constructor_str_infer_reso` to check `Timestamp('300-01-01')`. I suggest doing a parametrization for this test.
https://api.github.com/repos/pandas-dev/pandas/pulls/52199
2023-03-25T14:21:17Z
2023-03-25T19:40:58Z
2023-03-25T19:40:58Z
2023-03-25T19:40:58Z
DOC warn user about potential information loss in Resampler.interpolate
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e8864deaaca4d..8cc578b7fd0b6 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -825,7 +825,6 @@ def fillna(self, method, limit: int | None = None): """ return self._upsample(method, limit=limit) - @doc(NDFrame.interpolate, **_shared_docs_kwargs) def interpolate( self, method: QuantileInterpolation = "linear", @@ -839,7 +838,160 @@ def interpolate( **kwargs, ): """ - Interpolate values according to different methods. + Interpolate values between target timestamps according to different methods. + + The original index is first reindexed to target timestamps + (see :meth:`core.resample.Resampler.asfreq`), + then the interpolation of ``NaN`` values via :meth`DataFrame.interpolate` + happens. + + Parameters + ---------- + method : str, default 'linear' + Interpolation technique to use. One of: + + * 'linear': Ignore the index and treat the values as equally + spaced. This is the only method supported on MultiIndexes. + * 'time': Works on daily and higher resolution data to interpolate + given length of interval. + * 'index', 'values': use the actual numerical values of the index. + * 'pad': Fill in NaNs using existing values. + * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', + 'barycentric', 'polynomial': Passed to + `scipy.interpolate.interp1d`, whereas 'spline' is passed to + `scipy.interpolate.UnivariateSpline`. These methods use the numerical + values of the index. Both 'polynomial' and 'spline' require that + you also specify an `order` (int), e.g. + ``df.interpolate(method='polynomial', order=5)``. Note that, + `slinear` method in Pandas refers to the Scipy first order `spline` + instead of Pandas first order `spline`. + * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', + 'cubicspline': Wrappers around the SciPy interpolation methods of + similar names. See `Notes`. + * 'from_derivatives': Refers to + `scipy.interpolate.BPoly.from_derivatives` which + replaces 'piecewise_polynomial' interpolation method in + scipy 0.18. + + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Axis to interpolate along. For `Series` this parameter is unused + and defaults to 0. + limit : int, optional + Maximum number of consecutive NaNs to fill. Must be greater than + 0. + inplace : bool, default False + Update the data in place if possible. + limit_direction : {{'forward', 'backward', 'both'}}, Optional + Consecutive NaNs will be filled in this direction. + + If limit is specified: + * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. + * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be + 'backwards'. + + If 'limit' is not specified: + * If 'method' is 'backfill' or 'bfill', the default is 'backward' + * else the default is 'forward' + + .. versionchanged:: 1.1.0 + raises ValueError if `limit_direction` is 'forward' or 'both' and + method is 'backfill' or 'bfill'. + raises ValueError if `limit_direction` is 'backward' or 'both' and + method is 'pad' or 'ffill'. + + limit_area : {{`None`, 'inside', 'outside'}}, default None + If limit is specified, consecutive NaNs will be filled with this + restriction. + + * ``None``: No fill restriction. + * 'inside': Only fill NaNs surrounded by valid values + (interpolate). + * 'outside': Only fill NaNs outside valid values (extrapolate). + + downcast : optional, 'infer' or None, defaults to None + Downcast dtypes if possible. + ``**kwargs`` : optional + Keyword arguments to pass on to the interpolating function. + + Returns + ------- + DataFrame or Series + Interpolated values at the specified freq. + + See Also + -------- + core.resample.Resampler.asfreq: Return the values at the new freq, + essentially a reindex. + DataFrame.interpolate: Fill NaN values using an interpolation method. + + Notes + ----- + For high-frequent or non-equidistant time-series with timestamps + the reindexing followed by interpolation may lead to information loss + as shown in the last example. + + Examples + -------- + + >>> import datetime as dt + >>> timesteps = [ + ... dt.datetime(2023, 3, 1, 7, 0, 0), + ... dt.datetime(2023, 3, 1, 7, 0, 1), + ... dt.datetime(2023, 3, 1, 7, 0, 2), + ... dt.datetime(2023, 3, 1, 7, 0, 3), + ... dt.datetime(2023, 3, 1, 7, 0, 4)] + >>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps) + >>> series + 2023-03-01 07:00:00 1 + 2023-03-01 07:00:01 -1 + 2023-03-01 07:00:02 2 + 2023-03-01 07:00:03 1 + 2023-03-01 07:00:04 3 + dtype: int64 + + Upsample the dataframe to 0.5Hz by providing the period time of 2s. + + >>> series.resample("2s").interpolate("linear") + 2023-03-01 07:00:00 1 + 2023-03-01 07:00:02 2 + 2023-03-01 07:00:04 3 + Freq: 2S, dtype: int64 + + Downsample the dataframe to 2Hz by providing the period time of 500ms. + + >>> series.resample("500ms").interpolate("linear") + 2023-03-01 07:00:00.000 1.0 + 2023-03-01 07:00:00.500 0.0 + 2023-03-01 07:00:01.000 -1.0 + 2023-03-01 07:00:01.500 0.5 + 2023-03-01 07:00:02.000 2.0 + 2023-03-01 07:00:02.500 1.5 + 2023-03-01 07:00:03.000 1.0 + 2023-03-01 07:00:03.500 2.0 + 2023-03-01 07:00:04.000 3.0 + Freq: 500L, dtype: float64 + + Internal reindexing with ``as_freq()`` prior to interpolation leads to + an interpolated timeseries on the basis the reindexed timestamps (anchors). + Since not all datapoints from original series become anchors, + it can lead to misleading interpolation results as in the following example: + + >>> series.resample("400ms").interpolate("linear") + 2023-03-01 07:00:00.000 1.0 + 2023-03-01 07:00:00.400 1.2 + 2023-03-01 07:00:00.800 1.4 + 2023-03-01 07:00:01.200 1.6 + 2023-03-01 07:00:01.600 1.8 + 2023-03-01 07:00:02.000 2.0 + 2023-03-01 07:00:02.400 2.2 + 2023-03-01 07:00:02.800 2.4 + 2023-03-01 07:00:03.200 2.6 + 2023-03-01 07:00:03.600 2.8 + 2023-03-01 07:00:04.000 3.0 + Freq: 400L, dtype: float64 + + Note that the series erroneously increases between two anchors + ``07:00:00`` and ``07:00:02``. """ result = self._upsample("asfreq") return result.interpolate(
In scientific and technical domain people deal with high-frequent or non-equidistant timeseries. Using `resample("1s").interpolate()` can have unwanted side effects which we should warn in the documentation: ```python import datetime as dt import pandas as pd import matplotlib.pyplot as plt timesteps = [ dt.datetime(2023, 3, 1, 7, 0, 0), dt.datetime(2023, 3, 1, 7, 0, 1), dt.datetime(2023, 3, 1, 7, 0, 2), dt.datetime(2023, 3, 1, 7, 0, 3), dt.datetime(2023, 3, 1, 7, 0, 4) ] series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps) resample_freq = "400ms" series_resampler = series.resample(resample_freq) series_resampled_linear = series_resampler.interpolate("linear") ``` which leads to the following: ![result](https://user-images.githubusercontent.com/12762439/227719356-b3974634-ae19-40cc-8e0a-7ad668da76c4.png) For that example, information loss is expected. Imho we should warn the user about this behavior. It seems to be known, to quote @jreback from [here](https://github.com/pandas-dev/pandas/issues/14297#issuecomment-285434729): > so the reason this happens is because the index is first reindexed to the new time buckets (upsampled) via reindexing, then interpolation happens. - [x] refers to #12552, #14297 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52198
2023-03-25T13:18:00Z
2023-04-07T17:46:33Z
2023-04-07T17:46:33Z
2023-11-01T09:05:46Z
WARN: Only warn about inconsistent parsing if there are multiple non-null elements
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 0265b4404d6ab..3cd3dec185ccf 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -139,13 +139,16 @@ def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False) -> str ) if guessed_format is not None: return guessed_format - warnings.warn( - "Could not infer format, so each element will be parsed " - "individually, falling back to `dateutil`. To ensure parsing is " - "consistent and as-expected, please specify a format.", - UserWarning, - stacklevel=find_stack_level(), - ) + # If there are multiple non-null elements, warn about + # how parsing might not be consistent + if tslib.first_non_null(arr[first_non_null + 1 :]) != -1: + warnings.warn( + "Could not infer format, so each element will be parsed " + "individually, falling back to `dateutil`. To ensure parsing is " + "consistent and as-expected, please specify a format.", + UserWarning, + stacklevel=find_stack_level(), + ) return None diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index f3c49471b5bb2..8c3474220cde8 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1252,13 +1252,15 @@ def test_bad_date_parse(all_parsers, cache_dates, value): parser = all_parsers s = StringIO((f"{value},\n") * 50000) - if parser.engine == "pyarrow": + if parser.engine == "pyarrow" and not cache_dates: # None in input gets converted to 'None', for which # pandas tries to guess the datetime format, triggering # the warning. TODO: parse dates directly in pyarrow, see # https://github.com/pandas-dev/pandas/issues/48017 warn = UserWarning else: + # Note: warning is not raised if 'cache_dates', because here there is only a + # single unique date and hence no risk of inconsistent parsing. warn = None parser.read_csv_check_warnings( warn, @@ -1285,6 +1287,10 @@ def test_bad_date_parse_with_warning(all_parsers, cache_dates, value): # TODO: parse dates directly in pyarrow, see # https://github.com/pandas-dev/pandas/issues/48017 warn = None + elif cache_dates: + # Note: warning is not raised if 'cache_dates', because here there is only a + # single unique date and hence no risk of inconsistent parsing. + warn = None else: warn = UserWarning parser.read_csv_check_warnings( @@ -1737,9 +1743,7 @@ def test_parse_timezone(all_parsers): def test_invalid_parse_delimited_date(all_parsers, date_string): parser = all_parsers expected = DataFrame({0: [date_string]}, dtype="object") - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(date_string), header=None, parse_dates=[0], @@ -2063,9 +2067,7 @@ def test_infer_first_column_as_index(all_parsers): # GH#11019 parser = all_parsers data = "a,b,c\n1970-01-01,2,3,4" - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(data), parse_dates=["a"], ) diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index 4823df1da9959..f818d621c744f 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -124,9 +124,7 @@ def test_usecols_with_parse_dates4(all_parsers): } expected = DataFrame(cols, columns=["a_b"] + list("cdefghij")) - result = parser.read_csv_check_warnings( - UserWarning, - "Could not infer format", + result = parser.read_csv( StringIO(data), usecols=usecols, parse_dates=parse_dates, diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 07529fcbb49b7..ae5543ff266ef 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1231,8 +1231,7 @@ def test_value_counts_datetime_outofbounds(self): tm.assert_series_equal(res, exp) # GH 12424 - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore") + res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore") exp = Series(["2362-01-01", np.nan], dtype=object) tm.assert_series_equal(res, exp) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 6879f4dcbaa09..5962d52edae3e 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -271,8 +271,7 @@ def test_to_datetime_with_NA(self, data, format, expected): def test_to_datetime_with_NA_with_warning(self): # GH#42957 - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result = to_datetime(["201010", pd.NA]) + result = to_datetime(["201010", pd.NA]) expected = DatetimeIndex(["2010-10-20", "NaT"]) tm.assert_index_equal(result, expected) @@ -946,8 +945,7 @@ def test_to_datetime_YYYYMMDD(self): def test_to_datetime_unparsable_ignore(self): # unparsable ser = "Month 1, 1999" - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - assert to_datetime(ser, errors="ignore") == ser + assert to_datetime(ser, errors="ignore") == ser @td.skip_if_windows # `tm.set_timezone` does not work in windows def test_to_datetime_now(self): @@ -1344,17 +1342,13 @@ def test_invalid_format_raises(self, errors): to_datetime(["00:00:00"], format="H%:M%:S%", errors=errors) @pytest.mark.parametrize("value", ["a", "00:01:99"]) - @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] - ) - def test_datetime_invalid_scalar(self, value, format, warning): + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_invalid_scalar(self, value, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="ignore", format=format) + res = to_datetime(value, errors="ignore", format=format) assert res == value - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="coerce", format=format) + res = to_datetime(value, errors="coerce", format=format) assert res is NaT msg = "|".join( @@ -1368,21 +1362,16 @@ def test_datetime_invalid_scalar(self, value, format, warning): ] ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): - to_datetime(value, errors="raise", format=format) + to_datetime(value, errors="raise", format=format) @pytest.mark.parametrize("value", ["3000/12/11 00:00:00"]) - @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] - ) - def test_datetime_outofbounds_scalar(self, value, format, warning): + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_outofbounds_scalar(self, value, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="ignore", format=format) + res = to_datetime(value, errors="ignore", format=format) assert res == value - with tm.assert_produces_warning(warning, match="Could not infer format"): - res = to_datetime(value, errors="coerce", format=format) + res = to_datetime(value, errors="coerce", format=format) assert res is NaT if format is not None: @@ -1391,22 +1380,26 @@ def test_datetime_outofbounds_scalar(self, value, format, warning): to_datetime(value, errors="raise", format=format) else: msg = "^Out of bounds .*, at position 0$" - with pytest.raises( - OutOfBoundsDatetime, match=msg - ), tm.assert_produces_warning(warning, match="Could not infer format"): + with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(value, errors="raise", format=format) - @pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]]) @pytest.mark.parametrize( - "format,warning", [(None, UserWarning), ("%H:%M:%S", None)] + ("values"), [(["a"]), (["00:01:99"]), (["a", "b", "99:00:00"])] ) - def test_datetime_invalid_index(self, values, format, warning): + @pytest.mark.parametrize("format", [(None), ("%H:%M:%S")]) + def test_datetime_invalid_index(self, values, format): # GH24763 - with tm.assert_produces_warning(warning, match="Could not infer format"): + # Not great to have logic in tests, but this one's hard to + # parametrise over + if format is None and len(values) > 1: + warn = UserWarning + else: + warn = None + with tm.assert_produces_warning(warn, match="Could not infer format"): res = to_datetime(values, errors="ignore", format=format) tm.assert_index_equal(res, Index(values)) - with tm.assert_produces_warning(warning, match="Could not infer format"): + with tm.assert_produces_warning(warn, match="Could not infer format"): res = to_datetime(values, errors="coerce", format=format) tm.assert_index_equal(res, DatetimeIndex([NaT] * len(values))) @@ -1421,7 +1414,7 @@ def test_datetime_invalid_index(self, values, format, warning): ] ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): + with tm.assert_produces_warning(warn, match="Could not infer format"): to_datetime(values, errors="raise", format=format) @pytest.mark.parametrize("utc", [True, None]) @@ -2220,10 +2213,7 @@ def test_to_datetime_barely_out_of_bounds(self): msg = "^Out of bounds nanosecond timestamp: .*, at position 0" with pytest.raises(OutOfBoundsDatetime, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(arr) + to_datetime(arr) @pytest.mark.parametrize( "arg, exp_str", @@ -2537,10 +2527,7 @@ def test_string_invalid_operation(self, cache): # GH #51084 with pytest.raises(ValueError, match="Unknown datetime string format"): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(invalid, errors="raise", cache=cache) + to_datetime(invalid, errors="raise", cache=cache) def test_string_na_nat_conversion(self, cache): # GH #999, #858 @@ -2567,22 +2554,15 @@ def test_string_na_nat_conversion_malformed(self, cache): # GH 10636, default is now 'raise' msg = r"Unknown datetime string format" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(malformed, errors="raise", cache=cache) + to_datetime(malformed, errors="raise", cache=cache) - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result = to_datetime(malformed, errors="ignore", cache=cache) + result = to_datetime(malformed, errors="ignore", cache=cache) # GH 21864 expected = Index(malformed) tm.assert_index_equal(result, expected) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(malformed, errors="raise", cache=cache) + to_datetime(malformed, errors="raise", cache=cache) def test_string_na_nat_conversion_with_name(self, cache): idx = ["a", "b", "c", "d", "e"] @@ -2811,14 +2791,13 @@ def test_to_datetime_series_start_with_nans(self, cache): tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "tz_name, offset, warning", - [("UTC", 0, None), ("UTC-3", 180, UserWarning), ("UTC+3", -180, UserWarning)], + "tz_name, offset", + [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)], ) - def test_infer_datetime_format_tz_name(self, tz_name, offset, warning): + def test_infer_datetime_format_tz_name(self, tz_name, offset): # GH 33133 ser = Series([f"2019-02-02 08:07:13 {tz_name}"]) - with tm.assert_produces_warning(warning, match="Could not infer format"): - result = to_datetime(ser) + result = to_datetime(ser) tz = timezone(timedelta(minutes=offset)) expected = Series([Timestamp("2019-02-02 08:07:13").tz_localize(tz)]) tm.assert_series_equal(result, expected) @@ -2866,25 +2845,21 @@ class TestDaysInMonth: # tests for issue #10154 @pytest.mark.parametrize( - "arg, format, warning", + "arg, format", [ - ["2015-02-29", None, UserWarning], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-02-32", "%Y-%m-%d", None], - ["2015-04-31", "%Y-%m-%d", None], + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-32", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], ], ) - def test_day_not_in_month_coerce(self, cache, arg, format, warning): - with tm.assert_produces_warning(warning, match="Could not infer format"): - assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) + def test_day_not_in_month_coerce(self, cache, arg, format): + assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) def test_day_not_in_month_raise(self, cache): msg = "day is out of range for month: 2015-02-29, at position 0" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime("2015-02-29", errors="raise", cache=cache) + to_datetime("2015-02-29", errors="raise", cache=cache) @pytest.mark.parametrize( "arg, format, msg", @@ -2929,72 +2904,71 @@ def test_day_not_in_month_raise_value(self, cache, arg, format, msg): to_datetime(arg, errors="raise", format=format, cache=cache) @pytest.mark.parametrize( - "expected, format, warning", + "expected, format", [ - ["2015-02-29", None, UserWarning], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-02-29", "%Y-%m-%d", None], - ["2015-04-31", "%Y-%m-%d", None], + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-29", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], ], ) - def test_day_not_in_month_ignore(self, cache, expected, format, warning): - with tm.assert_produces_warning(warning, match="Could not infer format"): - result = to_datetime(expected, errors="ignore", format=format, cache=cache) + def test_day_not_in_month_ignore(self, cache, expected, format): + result = to_datetime(expected, errors="ignore", format=format, cache=cache) assert result == expected class TestDatetimeParsingWrappers: @pytest.mark.parametrize( - "date_str, expected, warning", + "date_str, expected", [ - ("2011-01-01", datetime(2011, 1, 1), None), - ("2Q2005", datetime(2005, 4, 1), UserWarning), - ("2Q05", datetime(2005, 4, 1), UserWarning), - ("2005Q1", datetime(2005, 1, 1), UserWarning), - ("05Q1", datetime(2005, 1, 1), UserWarning), - ("2011Q3", datetime(2011, 7, 1), UserWarning), - ("11Q3", datetime(2011, 7, 1), UserWarning), - ("3Q2011", datetime(2011, 7, 1), UserWarning), - ("3Q11", datetime(2011, 7, 1), UserWarning), + ("2011-01-01", datetime(2011, 1, 1)), + ("2Q2005", datetime(2005, 4, 1)), + ("2Q05", datetime(2005, 4, 1)), + ("2005Q1", datetime(2005, 1, 1)), + ("05Q1", datetime(2005, 1, 1)), + ("2011Q3", datetime(2011, 7, 1)), + ("11Q3", datetime(2011, 7, 1)), + ("3Q2011", datetime(2011, 7, 1)), + ("3Q11", datetime(2011, 7, 1)), # quarterly without space - ("2000Q4", datetime(2000, 10, 1), UserWarning), - ("00Q4", datetime(2000, 10, 1), UserWarning), - ("4Q2000", datetime(2000, 10, 1), UserWarning), - ("4Q00", datetime(2000, 10, 1), UserWarning), - ("2000q4", datetime(2000, 10, 1), UserWarning), - ("2000-Q4", datetime(2000, 10, 1), UserWarning), - ("00-Q4", datetime(2000, 10, 1), UserWarning), - ("4Q-2000", datetime(2000, 10, 1), UserWarning), - ("4Q-00", datetime(2000, 10, 1), UserWarning), - ("00q4", datetime(2000, 10, 1), UserWarning), - ("2005", datetime(2005, 1, 1), None), - ("2005-11", datetime(2005, 11, 1), None), - ("2005 11", datetime(2005, 11, 1), UserWarning), - ("11-2005", datetime(2005, 11, 1), UserWarning), - ("11 2005", datetime(2005, 11, 1), UserWarning), - ("200511", datetime(2020, 5, 11), UserWarning), - ("20051109", datetime(2005, 11, 9), None), - ("20051109 10:15", datetime(2005, 11, 9, 10, 15), None), - ("20051109 08H", datetime(2005, 11, 9, 8, 0), None), - ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15), None), - ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0), None), - ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15), None), - ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0), None), - ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28), None), - ("Thu Sep 25 2003", datetime(2003, 9, 25), None), - ("Sep 25 2003", datetime(2003, 9, 25), None), - ("January 1 2014", datetime(2014, 1, 1), None), + ("2000Q4", datetime(2000, 10, 1)), + ("00Q4", datetime(2000, 10, 1)), + ("4Q2000", datetime(2000, 10, 1)), + ("4Q00", datetime(2000, 10, 1)), + ("2000q4", datetime(2000, 10, 1)), + ("2000-Q4", datetime(2000, 10, 1)), + ("00-Q4", datetime(2000, 10, 1)), + ("4Q-2000", datetime(2000, 10, 1)), + ("4Q-00", datetime(2000, 10, 1)), + ("00q4", datetime(2000, 10, 1)), + ("2005", datetime(2005, 1, 1)), + ("2005-11", datetime(2005, 11, 1)), + ("2005 11", datetime(2005, 11, 1)), + ("11-2005", datetime(2005, 11, 1)), + ("11 2005", datetime(2005, 11, 1)), + ("200511", datetime(2020, 5, 11)), + ("20051109", datetime(2005, 11, 9)), + ("20051109 10:15", datetime(2005, 11, 9, 10, 15)), + ("20051109 08H", datetime(2005, 11, 9, 8, 0)), + ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0)), + ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0)), + ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28)), + ("Thu Sep 25 2003", datetime(2003, 9, 25)), + ("Sep 25 2003", datetime(2003, 9, 25)), + ("January 1 2014", datetime(2014, 1, 1)), # GHE10537 - ("2014-06", datetime(2014, 6, 1), None), - ("06-2014", datetime(2014, 6, 1), UserWarning), - ("2014-6", datetime(2014, 6, 1), None), - ("6-2014", datetime(2014, 6, 1), UserWarning), - ("20010101 12", datetime(2001, 1, 1, 12), None), - ("20010101 1234", datetime(2001, 1, 1, 12, 34), None), - ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56), None), + ("2014-06", datetime(2014, 6, 1)), + ("06-2014", datetime(2014, 6, 1)), + ("2014-6", datetime(2014, 6, 1)), + ("6-2014", datetime(2014, 6, 1)), + ("20010101 12", datetime(2001, 1, 1, 12)), + ("20010101 1234", datetime(2001, 1, 1, 12, 34)), + ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56)), ], ) - def test_parsers(self, date_str, expected, warning, cache): + def test_parsers(self, date_str, expected, cache): # dateutil >= 2.5.0 defaults to yearfirst=True # https://github.com/dateutil/dateutil/issues/217 yearfirst = True @@ -3002,13 +2976,12 @@ def test_parsers(self, date_str, expected, warning, cache): result1, _ = parsing.parse_datetime_string_with_reso( date_str, yearfirst=yearfirst ) - with tm.assert_produces_warning(warning, match="Could not infer format"): - result2 = to_datetime(date_str, yearfirst=yearfirst) - result3 = to_datetime([date_str], yearfirst=yearfirst) - # result5 is used below - result4 = to_datetime( - np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache - ) + result2 = to_datetime(date_str, yearfirst=yearfirst) + result3 = to_datetime([date_str], yearfirst=yearfirst) + # result5 is used below + result4 = to_datetime( + np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache + ) result6 = DatetimeIndex([date_str], yearfirst=yearfirst) # result7 is used below result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst) @@ -3117,10 +3090,9 @@ def test_parsers_dayfirst_yearfirst( result2 = Timestamp(date_str) assert result2 == expected - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result3 = to_datetime( - date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache - ) + result3 = to_datetime( + date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache + ) result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0] @@ -3137,9 +3109,8 @@ def test_parsers_timestring(self, date_str, exp_def): exp_now = parse(date_str) result1, _ = parsing.parse_datetime_string_with_reso(date_str) - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - result2 = to_datetime(date_str) - result3 = to_datetime([date_str]) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) result4 = Timestamp(date_str) result5 = DatetimeIndex([date_str])[0] # parse time string return time string based on default date @@ -3316,10 +3287,7 @@ def test_incorrect_value_exception(self): "Unknown datetime string format, unable to parse: yesterday, at position 1" ) with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - UserWarning, match="Could not infer format" - ): - to_datetime(["today", "yesterday"]) + to_datetime(["today", "yesterday"]) @pytest.mark.parametrize( "format, warning", @@ -3333,8 +3301,7 @@ def test_to_datetime_out_of_bounds_with_format_arg(self, format, warning): # see gh-23830 msg = r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0" with pytest.raises(OutOfBoundsDatetime, match=msg): - with tm.assert_produces_warning(warning, match="Could not infer format"): - to_datetime("2417-10-10 00:00:00", format=format) + to_datetime("2417-10-10 00:00:00", format=format) @pytest.mark.parametrize( "arg, origin, expected_str",
- [ ] closes #52167 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Not sure this needs a whatsnew note as it could just be considered part of the PDEP4 changes, which haven't yet reached user-facing status The idea here is to only warn if there are at least 2 non-null elements - otherwise, a single element can't be inconsistently-parsed :)
https://api.github.com/repos/pandas-dev/pandas/pulls/52195
2023-03-25T08:17:59Z
2023-03-27T17:23:45Z
2023-03-27T17:23:45Z
2023-03-27T17:46:40Z
BUG: Unexpected KeyError message when using .loc with MultiIndex in a possible edge-case
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index abe4a00e0b813..0f010da02472e 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2841,6 +2841,8 @@ def _maybe_to_slice(loc): # i.e. do we need _index_as_unique on that level? try: return self._engine.get_loc(key) + except KeyError as err: + raise KeyError(key) from err except TypeError: # e.g. test_partial_slicing_with_multiindex partial string slicing loc, _ = self.get_loc_level(key, list(range(self.nlevels))) diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py index 4bfba07332313..f069cdbedabf0 100644 --- a/pandas/tests/indexes/multi/test_drop.py +++ b/pandas/tests/indexes/multi/test_drop.py @@ -32,16 +32,16 @@ def test_drop(idx): tm.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([("bar", "two")]) - with pytest.raises(KeyError, match=r"^15$"): + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): idx.drop([("bar", "two")]) - with pytest.raises(KeyError, match=r"^15$"): + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): idx.drop(index) with pytest.raises(KeyError, match=r"^'two'$"): idx.drop(["foo", "two"]) # partially correct argument mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")]) - with pytest.raises(KeyError, match=r"^15$"): + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): idx.drop(mixed_index) # error='ignore' diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 31c5ab333ecfa..2b75efd130aa2 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -565,7 +565,7 @@ class TestGetLoc: def test_get_loc(self, idx): assert idx.get_loc(("foo", "two")) == 1 assert idx.get_loc(("baz", "two")) == 3 - with pytest.raises(KeyError, match=r"^15$"): + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): idx.get_loc(("bar", "two")) with pytest.raises(KeyError, match=r"^'quux'$"): idx.get_loc("quux") diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index d95b27574cd82..3bf8c2eaa7e94 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -420,6 +420,19 @@ def test_loc_no_second_level_index(self): ) tm.assert_frame_equal(res, expected) + def test_loc_multi_index_key_error(self): + # GH 51892 + df = DataFrame( + { + (1, 2): ["a", "b", "c"], + (1, 3): ["d", "e", "f"], + (2, 2): ["g", "h", "i"], + (2, 4): ["j", "k", "l"], + } + ) + with pytest.raises(KeyError, match=r"(1, 4)"): + df.loc[0, (1, 4)] + @pytest.mark.parametrize( "indexer, pos",
- [x] closes #51892 - [ ] [Tests added and passed] - [x] All [code checks passed] - [ ] Added [type annotations] - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52194
2023-03-25T07:04:21Z
2023-03-27T04:16:12Z
2023-03-27T04:16:12Z
2023-03-27T04:16:23Z
Backport PR #52174 on branch 2.0.x (BUG: to_numeric converting StringArray to object or float64)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 47f477a9a4e92..37eede59e257d 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2321,10 +2321,14 @@ def maybe_convert_numeric( if not seen.coerce_numeric: raise type(err)(f"{err} at position {i}") - seen.saw_null() - floats[i] = NaN mask[i] = 1 + if allow_null_in_int: + seen.null_ = True + else: + seen.saw_null() + floats[i] = NaN + if seen.check_uint64_conflict(): return (values, None) diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index c4a03ed8b79b7..514909caab4fc 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -21,8 +21,8 @@ is_integer_dtype, is_number, is_numeric_dtype, - is_object_dtype, is_scalar, + is_string_dtype, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( @@ -32,6 +32,7 @@ import pandas as pd from pandas.core.arrays import BaseMaskedArray +from pandas.core.arrays.string_ import StringDtype def to_numeric( @@ -191,6 +192,8 @@ def to_numeric( else: values = arg + orig_values = values + # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: npt.NDArray[np.bool_] | None = None @@ -215,17 +218,23 @@ def to_numeric( values, set(), coerce_numeric=coerce_numeric, - convert_to_masked_nullable=dtype_backend is not lib.no_default, + convert_to_masked_nullable=dtype_backend is not lib.no_default + or isinstance(values_dtype, StringDtype), ) except (ValueError, TypeError): if errors == "raise": raise + values = orig_values if new_mask is not None: # Remove unnecessary values, is expected later anyway and enables # downcasting values = values[~new_mask] - elif dtype_backend is not lib.no_default and new_mask is None: + elif ( + dtype_backend is not lib.no_default + and new_mask is None + or isinstance(values_dtype, StringDtype) + ): new_mask = np.zeros(values.shape, dtype=np.bool_) # attempt downcast only if the data has been successfully converted @@ -260,7 +269,7 @@ def to_numeric( # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct # masked array - if (mask is not None or new_mask is not None) and not is_object_dtype(values.dtype): + if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype): if mask is None: mask = new_mask else: diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 4a0b01a275523..fe6794b120681 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -723,12 +723,12 @@ def test_precision_float_conversion(strrep): @pytest.mark.parametrize( "values, expected", [ - (["1", "2", None], Series([1, 2, np.nan])), - (["1", "2", "3"], Series([1, 2, 3])), - (["1", "2", 3], Series([1, 2, 3])), - (["1", "2", 3.5], Series([1, 2, 3.5])), - (["1", None, 3.5], Series([1, np.nan, 3.5])), - (["1", "2", "3.5"], Series([1, 2, 3.5])), + (["1", "2", None], Series([1, 2, np.nan], dtype="Int64")), + (["1", "2", "3"], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3.5], Series([1, 2, 3.5], dtype="Float64")), + (["1", None, 3.5], Series([1, np.nan, 3.5], dtype="Float64")), + (["1", "2", "3.5"], Series([1, 2, 3.5], dtype="Float64")), ], ) def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected): @@ -738,6 +738,24 @@ def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected tm.assert_series_equal(result, expected) +def test_to_numeric_from_nullable_string_coerce(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + result = to_numeric(ser, errors="coerce") + expected = Series([pd.NA, 1], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_ignore(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + expected = ser.copy() + result = to_numeric(ser, errors="ignore") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( "data, input_dtype, downcast, expected_dtype", (
#52174
https://api.github.com/repos/pandas-dev/pandas/pulls/52193
2023-03-25T05:21:07Z
2023-03-25T15:04:23Z
2023-03-25T15:04:23Z
2023-04-22T17:25:31Z
Backport PR #52184 on branch 2.0.x (DOC: Clarify difference between StringDtype(pyarrow) and ArrowDtype(string))
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index edcd3d2a40b1a..54e49448daca8 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -93,9 +93,10 @@ PyArrow type pandas extension type NumPy .. note:: - For string types (``pyarrow.string()``, ``string[pyarrow]``), PyArrow support is still facilitated - by :class:`arrays.ArrowStringArray` and ``StringDtype("pyarrow")``. See the :ref:`string section <api.arrays.string>` - below. + Pyarrow-backed string support is provided by both ``pd.StringDtype("pyarrow")`` and ``pd.ArrowDtype(pa.string())``. + ``pd.StringDtype("pyarrow")`` is described below in the :ref:`string section <api.arrays.string>` + and will be returned if the string alias ``"string[pyarrow]"`` is specified. ``pd.ArrowDtype(pa.string())`` + generally has better interoperability with :class:`ArrowDtype` of different types. While individual values in an :class:`arrays.ArrowExtensionArray` are stored as a PyArrow objects, scalars are **returned** as Python scalars corresponding to the data type, e.g. a PyArrow int64 will be returned as Python int, or :class:`NA` for missing diff --git a/doc/source/user_guide/pyarrow.rst b/doc/source/user_guide/pyarrow.rst index 8531216ecc61e..63937ed27b8b2 100644 --- a/doc/source/user_guide/pyarrow.rst +++ b/doc/source/user_guide/pyarrow.rst @@ -35,6 +35,23 @@ which is similar to a NumPy array. To construct these from the main pandas data df = pd.DataFrame([[1, 2], [3, 4]], dtype="uint64[pyarrow]") df +.. note:: + + The string alias ``"string[pyarrow]"`` maps to ``pd.StringDtype("pyarrow")`` which is not equivalent to + specifying ``dtype=pd.ArrowDtype(pa.string())``. Generally, operations on the data will behave similarly + except ``pd.StringDtype("pyarrow")`` can return NumPy-backed nullable types while ``pd.ArrowDtype(pa.string())`` + will return :class:`ArrowDtype`. + + .. ipython:: python + + import pyarrow as pa + data = list("abc") + ser_sd = pd.Series(data, dtype="string[pyarrow]") + ser_ad = pd.Series(data, dtype=pd.ArrowDtype(pa.string())) + ser_ad.dtype == ser_sd.dtype + ser_sd.str.contains("a") + ser_ad.str.contains("a") + For PyArrow types that accept parameters, you can pass in a PyArrow type with those parameters into :class:`ArrowDtype` to use in the ``dtype`` parameter. @@ -106,6 +123,7 @@ The following are just some examples of operations that are accelerated by nativ .. ipython:: python + import pyarrow as pa ser = pd.Series([-1.545, 0.211, None], dtype="float32[pyarrow]") ser.mean() ser + ser @@ -115,7 +133,7 @@ The following are just some examples of operations that are accelerated by nativ ser.isna() ser.fillna(0) - ser_str = pd.Series(["a", "b", None], dtype="string[pyarrow]") + ser_str = pd.Series(["a", "b", None], dtype=pd.ArrowDtype(pa.string())) ser_str.str.startswith("a") from datetime import datetime
Backport PR #52184: DOC: Clarify difference between StringDtype(pyarrow) and ArrowDtype(string)
https://api.github.com/repos/pandas-dev/pandas/pulls/52192
2023-03-25T05:09:38Z
2023-03-25T15:04:32Z
2023-03-25T15:04:32Z
2023-03-25T15:04:32Z
BUG: construct Timestamp with year out of pydatetime range
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 9e4bba1cf3544..4dcb3db0c2d72 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1626,6 +1626,30 @@ class Timestamp(_Timestamp): # When year, month or day is not given, we call the datetime # constructor to make sure we get the same error message # since Timestamp inherits datetime + + if year is not None and not (0 < year < 10_000): + # GH#52091 + obj = cls( + year=1970, + month=month, + day=day, + hour=hour, + minute=minute, + second=second, + microsecond=microsecond, + nanosecond=nanosecond, + fold=fold, + tz=tz, + tzinfo=tzinfo, + unit=unit, + ) + if nanosecond is not None and nanosecond != 0: + raise OutOfBoundsDatetime( + f"Cannot construct a Timestamp with year={year} and " + "non-zero nanoseconds" + ) + return obj.as_unit("us").replace(year=year) + datetime_kwargs = { "hour": hour or 0, "minute": minute or 0, @@ -1646,6 +1670,29 @@ class Timestamp(_Timestamp): # User passed positional arguments: # Timestamp(year, month, day[, hour[, minute[, second[, # microsecond[, tzinfo]]]]]) + if not (0 < ts_input < 10_000): + # GH#52091 + obj = cls( + 1970, + year, + month, + day, + hour, + minute, + second, + microsecond, + nanosecond=nanosecond, + fold=fold, + tz=tz, + tzinfo=tzinfo, + unit=unit, + ) + if nanosecond is not None and nanosecond != 0: + raise OutOfBoundsDatetime( + f"Cannot construct a Timestamp with year={ts_input} and " + "non-zero nanoseconds" + ) + return obj.as_unit("us").replace(year=ts_input) ts_input = datetime(ts_input, year, month, day or 0, hour or 0, minute or 0, second or 0, fold=fold or 0) unit = None diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 5fca577ff28d1..d9f73b632d5b9 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -25,6 +25,26 @@ class TestTimestampConstructors: + def test_construct_year_out_of_pydatetime_bounds(self): + # GH#52091 pass a year outside of pydatetime bounds either as positional + # or keyword argument + ts = Timestamp(year=21000, month=1, day=2) + assert ts.year == 21000 + assert ts.month == 1 + assert ts.day == 2 + assert ts.unit == "us" + + ts2 = Timestamp(21000, 1, 2) + assert ts2 == ts + assert ts2.unit == "us" + + msg = "Cannot construct a Timestamp with year=21000 and non-zero nanoseconds" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(year=21000, month=1, day=2, nanosecond=1) + + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(21000, 1, 2, nanosecond=1) + def test_weekday_but_no_day_raises(self): # GH#52659 msg = "Parsing datetimes with weekday but no day information is not supported"
- [x] closes #52091 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52191
2023-03-24T23:39:01Z
2024-01-31T18:51:32Z
null
2024-01-31T18:51:33Z
REF/TYP: stricter typing for Series._slice
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bef7022a7d10f..f1a1f842d2107 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3758,18 +3758,10 @@ def __getitem__(self, key): elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) + # Do we have a slicer (on rows)? if isinstance(key, slice): - indexer = self.index._convert_slice_indexer(key, kind="getitem") - if isinstance(indexer, np.ndarray): - # reachable with DatetimeIndex - indexer = lib.maybe_indices_to_slice( - indexer.astype(np.intp, copy=False), len(self) - ) - if isinstance(indexer, np.ndarray): - # GH#43223 If we can not convert, use take - return self.take(indexer, axis=0) - return self._slice(indexer, axis=0) + return self._getitem_slice(key) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2b650d99c7e6c..141fababb15be 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4141,7 +4141,26 @@ class animal locomotion def __getitem__(self, item): raise AbstractMethodError(self) - def _slice(self, slobj: slice, axis: Axis = 0) -> Self: + @final + def _getitem_slice(self, key: slice) -> Self: + """ + __getitem__ for the case where the key is a slice object. + """ + # _convert_slice_indexer to determine if this slice is positional + # or label based, and if the latter, convert to positional + slobj = self.index._convert_slice_indexer(key, kind="getitem") + if isinstance(slobj, np.ndarray): + # reachable with DatetimeIndex + indexer = lib.maybe_indices_to_slice( + slobj.astype(np.intp, copy=False), len(self) + ) + if isinstance(indexer, np.ndarray): + # GH#43223 If we can not convert, use take + return self.take(indexer, axis=0) + slobj = indexer + return self._slice(slobj) + + def _slice(self, slobj: slice, axis: AxisInt = 0) -> Self: """ Construct a slice of this container. diff --git a/pandas/core/series.py b/pandas/core/series.py index 06c744c3e36fa..97d84ffdbab4b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -943,10 +943,12 @@ def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ return self._values[i] - def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: + def _slice(self, slobj: slice, axis: AxisInt = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional - return self._get_values(slobj) + mgr = self._mgr.get_slice(slobj, axis=axis) + out = self._constructor(mgr, fastpath=True) + return out.__finalize__(self) def __getitem__(self, key): check_dict_or_set_indexers(key) @@ -983,10 +985,7 @@ def __getitem__(self, key): if isinstance(key, slice): # Do slice check before somewhat-costly is_bool_indexer - # _convert_slice_indexer to determine if this slice is positional - # or label based, and if the latter, convert to positional - slobj = self.index._convert_slice_indexer(key, kind="getitem") - return self._slice(slobj) + return self._getitem_slice(key) if is_iterator(key): key = list(key)
Motivation is that Series._slice surprisingly accepts non-slice inputs. Changing that required copying the slice-handling code from `DataFrame.__getitem__`, at which point it made sense to share that by implementing `NDFrame._getitem_slice`
https://api.github.com/repos/pandas-dev/pandas/pulls/52190
2023-03-24T23:04:12Z
2023-03-27T06:18:35Z
2023-03-27T06:18:35Z
2023-03-27T14:49:13Z
BUG: fix dic_values update
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 6d5daf5025c49..9720f3799e864 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,5 +1,6 @@ from __future__ import annotations +import collections from contextlib import suppress import sys from typing import ( @@ -1864,6 +1865,11 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): if len(value) == 1 and not is_integer(info_axis): # This is a case like df.iloc[:3, [1]] = [0] # where we treat as df.iloc[:3, 1] = 0 + + # to avoid "TypeError: 'dict_values' object is not subscriptable" + if isinstance(value, collections.abc.ValuesView): + value = list(value) + return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError( diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 9b01c6b45918c..6e4f12fee1e72 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -3197,6 +3197,68 @@ def test_loc_setitem_dict_timedelta_multiple_set(self): ) tm.assert_frame_equal(result, expected) + def test_loc_setitems_dict(self): + # GH 52175 + + # single row, single column + df1 = DataFrame(index=[1, 2], columns=["a"]) + d = {"b": 1.1} + df1.loc[1, d.keys()] = d.values() + + # single row, multiple columns + df2 = DataFrame(index=[1, 2], columns=["a"]) + d = {"b": 1.1, "c": 2.2} + df2.loc[1, d.keys()] = d.values() + + # multiple rows, single column + df3 = DataFrame(index=[1, 2], columns=["a"]) + d = {"b": 1.1} + df3.loc[[1, 2], d.keys()] = d.values() + + # multiple rows, multiple columns + df4 = DataFrame(index=[1, 2], columns=["a"]) + d = {"b": 1.1, "c": 2.2} + df4.loc[[1, 2], d.keys()] = d.values() + + expected1 = DataFrame( + { + "a": Series([np.nan, np.nan], dtype="object"), + "b": [1.1, np.nan], + }, + index=[1, 2], + ) + + expected2 = DataFrame( + { + "a": Series([np.nan, np.nan], dtype="object"), + "b": [1.1, np.nan], + "c": [2.2, np.nan], + }, + index=[1, 2], + ) + + expected3 = DataFrame( + { + "a": Series([np.nan, np.nan], dtype="object"), + "b": [1.1, 1.1], + }, + index=[1, 2], + ) + + expected4 = DataFrame( + { + "a": Series([np.nan, np.nan], dtype="object"), + "b": [1.1, 1.1], + "c": [2.2, 2.2], + }, + index=[1, 2], + ) + + tm.assert_frame_equal(df1, expected1) + tm.assert_frame_equal(df2, expected2) + tm.assert_frame_equal(df3, expected3) + tm.assert_frame_equal(df4, expected4) + def test_loc_set_multiple_items_in_multiple_new_columns(self): # GH 25594 df = DataFrame(index=[1, 2], columns=["a"])
- [x] closes #52175 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. -> I'm not sure where I should put an entry: 1.x.x vs 2.x.x
https://api.github.com/repos/pandas-dev/pandas/pulls/52188
2023-03-24T22:08:43Z
2023-04-21T17:32:55Z
null
2023-04-21T17:32:55Z
DEPR: subclassing Index
diff --git a/doc/source/development/internals.rst b/doc/source/development/internals.rst index 3dd687ef2087d..e3468746ce177 100644 --- a/doc/source/development/internals.rst +++ b/doc/source/development/internals.rst @@ -31,31 +31,9 @@ There are functions that make the creation of a regular index easy: * :func:`period_range`: fixed frequency date range generated from a time rule or DateOffset. An ndarray of :class:`Period` objects, representing timespans -The motivation for having an ``Index`` class in the first place was to enable -different implementations of indexing. This means that it's possible for you, -the user, to implement a custom ``Index`` subclass that may be better suited to -a particular application than the ones provided in pandas. - -From an internal implementation point of view, the relevant methods that an -``Index`` must define are one or more of the following (depending on how -incompatible the new object internals are with the ``Index`` functions): - -* :meth:`~Index.get_loc`: returns an "indexer" (an integer, or in some cases a - slice object) for a label -* :meth:`~Index.slice_locs`: returns the "range" to slice between two labels -* :meth:`~Index.get_indexer`: Computes the indexing vector for reindexing / data - alignment purposes. See the source / docstrings for more on this -* :meth:`~Index.get_indexer_non_unique`: Computes the indexing vector for reindexing / data - alignment purposes when the index is non-unique. See the source / docstrings - for more on this -* :meth:`~Index.reindex`: Does any pre-conversion of the input index then calls - ``get_indexer`` -* :meth:`~Index.union`, :meth:`~Index.intersection`: computes the union or intersection of two - Index objects -* :meth:`~Index.insert`: Inserts a new label into an Index, yielding a new object -* :meth:`~Index.delete`: Delete a label, yielding a new object -* :meth:`~Index.drop`: Deletes a set of labels -* :meth:`~Index.take`: Analogous to ndarray.take +.. warning:: + + Custom :class:`Index` subclasses are not supported, custom behavior should be implemented using the :class:`ExtensionArray` interface instead. MultiIndex ~~~~~~~~~~ diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index bac567b537edc..b745d640b47c3 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -104,6 +104,7 @@ Deprecations - Deprecated :meth:`.Groupby.all` and :meth:`.GroupBy.any` with datetime64 or :class:`PeriodDtype` values, matching the :class:`Series` and :class:`DataFrame` deprecations (:issue:`34479`) - Deprecating pinning ``group.name`` to each group in :meth:`SeriesGroupBy.aggregate` aggregations; if your operation requires utilizing the groupby keys, iterate over the groupby object instead (:issue:`41090`) - Deprecated the default of ``observed=False`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby`; this will default to ``True`` in a future version (:issue:`43999`) +- Deprecated explicit support for subclassing :class:`Index` (:issue:`45289`) - Deprecated :meth:`DataFrameGroupBy.dtypes`, check ``dtypes`` on the underlying object instead (:issue:`51045`) - Deprecated ``axis=1`` in :meth:`DataFrame.groupby` and in :class:`Grouper` constructor, do ``frame.T.groupby(...)`` instead (:issue:`51203`) - Deprecated :meth:`Categorical.to_list`, use ``obj.tolist()`` instead (:issue:`51254`)
- [x] closes #45289 (Replace xxxx with the GitHub issue number) - [x] closes #15258 - [x] closes #37882 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This is just a docs change, removing the wording that suggests we explicitly support subclassing Index. So it shouldn't break any implementations in the wild.
https://api.github.com/repos/pandas-dev/pandas/pulls/52186
2023-03-24T22:04:39Z
2023-03-29T21:45:28Z
2023-03-29T21:45:28Z
2023-03-29T21:46:15Z
Backport PR #52180 on branch 2.0.x (BUG: to_sql raises when arrow dtype has missing values)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 390eb33d6eefe..81d8183a79bc1 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -2077,7 +2077,7 @@ def _dt_round( def _dt_to_pydatetime(self): data = self._data.to_pylist() if self._dtype.pyarrow_dtype.unit == "ns": - data = [ts.to_pydatetime(warn=False) for ts in data] + data = [None if ts is None else ts.to_pydatetime(warn=False) for ts in data] return np.array(data, dtype=object) def _dt_tz_localize( diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 40cd011a1dd62..2504794384038 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -570,6 +570,22 @@ def test_dataframe_to_sql_arrow_dtypes(conn, request): df.to_sql("test_arrow", conn, if_exists="replace", index=False) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "datetime": pd.array( + [datetime(2023, 1, 1), nulls_fixture], dtype="timestamp[ns][pyarrow]" + ), + } + ) + conn = request.getfixturevalue(conn) + df.to_sql("test_arrow", conn, if_exists="replace", index=False) + + @pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize("method", [None, "multi"])
Backport PR #52180: BUG: to_sql raises when arrow dtype has missing values
https://api.github.com/repos/pandas-dev/pandas/pulls/52185
2023-03-24T21:55:49Z
2023-03-24T23:53:04Z
2023-03-24T23:53:03Z
2023-03-24T23:53:04Z
DOC: Clarify difference between StringDtype(pyarrow) and ArrowDtype(string)
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index edcd3d2a40b1a..54e49448daca8 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -93,9 +93,10 @@ PyArrow type pandas extension type NumPy .. note:: - For string types (``pyarrow.string()``, ``string[pyarrow]``), PyArrow support is still facilitated - by :class:`arrays.ArrowStringArray` and ``StringDtype("pyarrow")``. See the :ref:`string section <api.arrays.string>` - below. + Pyarrow-backed string support is provided by both ``pd.StringDtype("pyarrow")`` and ``pd.ArrowDtype(pa.string())``. + ``pd.StringDtype("pyarrow")`` is described below in the :ref:`string section <api.arrays.string>` + and will be returned if the string alias ``"string[pyarrow]"`` is specified. ``pd.ArrowDtype(pa.string())`` + generally has better interoperability with :class:`ArrowDtype` of different types. While individual values in an :class:`arrays.ArrowExtensionArray` are stored as a PyArrow objects, scalars are **returned** as Python scalars corresponding to the data type, e.g. a PyArrow int64 will be returned as Python int, or :class:`NA` for missing diff --git a/doc/source/user_guide/pyarrow.rst b/doc/source/user_guide/pyarrow.rst index 8531216ecc61e..63937ed27b8b2 100644 --- a/doc/source/user_guide/pyarrow.rst +++ b/doc/source/user_guide/pyarrow.rst @@ -35,6 +35,23 @@ which is similar to a NumPy array. To construct these from the main pandas data df = pd.DataFrame([[1, 2], [3, 4]], dtype="uint64[pyarrow]") df +.. note:: + + The string alias ``"string[pyarrow]"`` maps to ``pd.StringDtype("pyarrow")`` which is not equivalent to + specifying ``dtype=pd.ArrowDtype(pa.string())``. Generally, operations on the data will behave similarly + except ``pd.StringDtype("pyarrow")`` can return NumPy-backed nullable types while ``pd.ArrowDtype(pa.string())`` + will return :class:`ArrowDtype`. + + .. ipython:: python + + import pyarrow as pa + data = list("abc") + ser_sd = pd.Series(data, dtype="string[pyarrow]") + ser_ad = pd.Series(data, dtype=pd.ArrowDtype(pa.string())) + ser_ad.dtype == ser_sd.dtype + ser_sd.str.contains("a") + ser_ad.str.contains("a") + For PyArrow types that accept parameters, you can pass in a PyArrow type with those parameters into :class:`ArrowDtype` to use in the ``dtype`` parameter. @@ -106,6 +123,7 @@ The following are just some examples of operations that are accelerated by nativ .. ipython:: python + import pyarrow as pa ser = pd.Series([-1.545, 0.211, None], dtype="float32[pyarrow]") ser.mean() ser + ser @@ -115,7 +133,7 @@ The following are just some examples of operations that are accelerated by nativ ser.isna() ser.fillna(0) - ser_str = pd.Series(["a", "b", None], dtype="string[pyarrow]") + ser_str = pd.Series(["a", "b", None], dtype=pd.ArrowDtype(pa.string())) ser_str.str.startswith("a") from datetime import datetime
Spawned from a discussion in #52156
https://api.github.com/repos/pandas-dev/pandas/pulls/52184
2023-03-24T21:23:53Z
2023-03-25T05:09:03Z
2023-03-25T05:09:03Z
2023-03-25T20:42:59Z
PERF: slicing
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 533727f8f2d42..88c95331cd393 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -831,7 +831,7 @@ cdef class BlockManager: # ------------------------------------------------------------------- # Indexing - cdef BlockManager _get_index_slice(self, slobj): + cdef BlockManager _get_index_slice(self, slice slobj): cdef: SharedBlock blk, nb BlockManager mgr diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 31d4274bb5f8d..2e425f5797c62 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -47,6 +47,7 @@ def is_decimal(val: object) -> TypeGuard[Decimal]: ... def is_complex(val: object) -> TypeGuard[complex]: ... def is_bool(val: object) -> TypeGuard[bool | np.bool_]: ... def is_integer(val: object) -> TypeGuard[int | np.integer]: ... +def is_int_or_none(obj) -> bool: ... def is_float(val: object) -> TypeGuard[float]: ... def is_interval_array(values: np.ndarray) -> bool: ... def is_datetime64_array(values: np.ndarray) -> bool: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index c6aded1b25281..573f5aca6aff6 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1057,6 +1057,17 @@ def is_integer(obj: object) -> bool: return util.is_integer_object(obj) +def is_int_or_none(obj) -> bool: + """ + Return True if given object is integer or None. + + Returns + ------- + bool + """ + return obj is None or util.is_integer_object(obj) + + def is_bool(obj: object) -> bool: """ Return True if given object is boolean. diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index ffd33a39b8d2b..55bb58f3108c3 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -10,6 +10,8 @@ import numpy as np +from pandas._libs import lib + from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, @@ -50,14 +52,10 @@ def is_valid_positional_slice(slc: slice) -> bool: A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced. """ - - def is_int_or_none(val): - return val is None or is_integer(val) - return ( - is_int_or_none(slc.start) - and is_int_or_none(slc.stop) - and is_int_or_none(slc.step) + lib.is_int_or_none(slc.start) + and lib.is_int_or_none(slc.stop) + and lib.is_int_or_none(slc.step) ) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index eb79278eb35d9..e615d9055efc4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -99,7 +99,6 @@ is_float_dtype, is_hashable, is_integer, - is_integer_dtype, is_iterator, is_list_like, is_numeric_dtype, @@ -161,7 +160,10 @@ extract_array, sanitize_array, ) -from pandas.core.indexers import disallow_ndim_indexing +from pandas.core.indexers import ( + disallow_ndim_indexing, + is_valid_positional_slice, +) from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name @@ -4071,7 +4073,7 @@ def _validate_positional_slice(self, key: slice) -> None: self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") - def _convert_slice_indexer(self, key: slice, kind: str_t): + def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]): """ Convert a slice indexer. @@ -4083,7 +4085,6 @@ def _convert_slice_indexer(self, key: slice, kind: str_t): key : label of the slice bound kind : {'loc', 'getitem'} """ - assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step @@ -4096,22 +4097,14 @@ def _convert_slice_indexer(self, key: slice, kind: str_t): return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer - def is_int(v): - return v is None or is_integer(v) - - is_index_slice = is_int(start) and is_int(stop) and is_int(step) - - # special case for interval_dtype bc we do not do partial-indexing - # on integer Intervals when slicing - # TODO: write this in terms of e.g. should_partial_index? - ints_are_positional = self._should_fallback_to_positional or isinstance( - self.dtype, IntervalDtype - ) - is_positional = is_index_slice and ints_are_positional + is_index_slice = is_valid_positional_slice(key) if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers - if is_index_slice or is_integer_dtype(self.dtype): + if is_index_slice: + # In this case the _validate_indexer checks below are redundant + return key + elif self.dtype.kind in "iu": # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") @@ -4120,6 +4113,14 @@ def is_int(v): # convert the slice to an indexer here + # special case for interval_dtype bc we do not do partial-indexing + # on integer Intervals when slicing + # TODO: write this in terms of e.g. should_partial_index? + ints_are_positional = self._should_fallback_to_positional or isinstance( + self.dtype, IntervalDtype + ) + is_positional = is_index_slice and ints_are_positional + # if we are mixed and have integers if is_positional: try: @@ -4151,7 +4152,7 @@ def is_int(v): @final def _raise_invalid_indexer( self, - form: str_t, + form: Literal["slice", "positional"], key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: @@ -6384,14 +6385,17 @@ def _maybe_cast_listlike_indexer(self, target) -> Index: return ensure_index(target) @final - def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: + def _validate_indexer( + self, + form: Literal["positional", "slice"], + key, + kind: Literal["getitem", "iloc"], + ) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ - assert kind in ["getitem", "iloc"] - - if key is not None and not is_integer(key): + if not lib.is_int_or_none(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 1740c5c368a94..ede3b8f0c0e95 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -784,7 +784,7 @@ def _index_as_unique(self) -> bool: "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique" ) - def _convert_slice_indexer(self, key: slice, kind: str): + def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]): if not (key.step is None or key.step == 1): # GH#31658 if label-based, we require step == 1, # if positional, we disallow float start/stop diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 66c5a12549f23..8ed9543cc00dd 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -54,6 +54,7 @@ npt, ) _empty_range = range(0) +_dtype_int64 = np.dtype(np.int64) class RangeIndex(Index): @@ -309,7 +310,7 @@ def memory_usage(self, deep: bool = False) -> int: @property def dtype(self) -> np.dtype: - return np.dtype(np.int64) + return _dtype_int64 @property def is_unique(self) -> bool:
``` import pandas as pd ser = pd.Series(range(300_000)) df = ser.to_frame() %timeit ser[:30] 14.6 µs ± 474 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- main 12.6 µs ± 421 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- PR %timeit df[:30] 16.2 µs ± 1.23 µs per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- main 11.9 µs ± 184 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/52183
2023-03-24T20:40:28Z
2023-03-29T19:55:21Z
2023-03-29T19:55:21Z
2023-03-31T14:49:23Z
Backport PR #52171 on branch 2.0.x (DOC: update SemiMonthEnd examples to not use (n=0))
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index f2869b1779b52..fd3d80a8a3fa6 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2788,10 +2788,10 @@ cdef class SemiMonthEnd(SemiMonthOffset): >>> ts + pd.offsets.SemiMonthEnd() Timestamp('2022-02-15 00:00:00') - If you want to get the result for the current month pass the parameter n equals 0: + If you want to get the result for the current month: >>> ts = pd.Timestamp(2022, 1, 15) - >>> ts + pd.offsets.SemiMonthEnd(0) + >>> pd.offsets.SemiMonthEnd().rollforward(ts) Timestamp('2022-01-15 00:00:00') """ _prefix = "SM"
Backport PR #52171: DOC: update SemiMonthEnd examples to not use (n=0)
https://api.github.com/repos/pandas-dev/pandas/pulls/52182
2023-03-24T20:36:36Z
2023-03-25T05:10:33Z
2023-03-25T05:10:33Z
2023-03-25T05:10:34Z
DOC: series.py quantile explanation example
diff --git a/pandas/core/series.py b/pandas/core/series.py index 06c744c3e36fa..fe47d82b95740 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2573,6 +2573,9 @@ def quantile( * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. + For example, if (len(self) - 1) * q is 9.6, + and the elements at indices 9 and 10 are 3 and 4, + return 0.4 * 3 + 0.6 * 4 = 3.6 * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest.
- [ ] closes #51745
https://api.github.com/repos/pandas-dev/pandas/pulls/52181
2023-03-24T20:10:20Z
2023-04-02T14:12:19Z
null
2023-04-02T14:12:19Z
BUG: to_sql raises when arrow dtype has missing values
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 353da80e27464..6b722d800519c 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -2093,7 +2093,7 @@ def _dt_round( def _dt_to_pydatetime(self): data = self._pa_array.to_pylist() if self._dtype.pyarrow_dtype.unit == "ns": - data = [ts.to_pydatetime(warn=False) for ts in data] + data = [None if ts is None else ts.to_pydatetime(warn=False) for ts in data] return np.array(data, dtype=object) def _dt_tz_localize( diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 1bfc5cf0c3178..ab88e4ccd8b82 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -570,6 +570,22 @@ def test_dataframe_to_sql_arrow_dtypes(conn, request): df.to_sql("test_arrow", conn, if_exists="replace", index=False) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "datetime": pd.array( + [datetime(2023, 1, 1), nulls_fixture], dtype="timestamp[ns][pyarrow]" + ), + } + ) + conn = request.getfixturevalue(conn) + df.to_sql("test_arrow", conn, if_exists="replace", index=False) + + @pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize("method", [None, "multi"])
- [x] closes #52178 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52180
2023-03-24T19:57:48Z
2023-03-24T21:55:14Z
2023-03-24T21:55:14Z
2023-03-24T23:49:33Z
BUG: set_levels not preserving categorical
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 64c7503849de2..1f5c3c88c5ff5 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -200,7 +200,7 @@ Missing MultiIndex ^^^^^^^^^^ -- +- Bug in :meth:`MultiIndex.set_levels` not preserving dtypes for :class:`Categorical` (:issue:`52125`) - I/O diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 580a1901fc2da..abe4a00e0b813 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -73,6 +73,7 @@ ABCDatetimeIndex, ABCTimedeltaIndex, ) +from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import ( array_equivalent, isna, @@ -945,7 +946,11 @@ def set_levels( FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]]) """ - if is_list_like(levels) and not isinstance(levels, Index): + if isinstance(levels, Index): + pass + elif is_array_like(levels): + levels = Index(levels) + elif is_list_like(levels): levels = list(levels) level, levels = _require_listlike(level, levels, "Levels") diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index 70350f0df821b..8f5bba7debf2a 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -367,3 +367,11 @@ def test_set_levels_pos_args_removal(): with pytest.raises(TypeError, match="positional arguments"): idx.set_codes([[0, 1], [1, 0]], 0) + + +def test_set_levels_categorical_keep_dtype(): + # GH#52125 + midx = MultiIndex.from_arrays([[5, 6]]) + result = midx.set_levels(levels=pd.Categorical([1, 2]), level=0) + expected = MultiIndex.from_arrays([pd.Categorical([1, 2])]) + tm.assert_index_equal(result, expected)
- [x] closes #52125 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52177
2023-03-24T19:22:34Z
2023-03-24T21:46:45Z
2023-03-24T21:46:45Z
2023-03-24T23:48:49Z
PERF: avoid exceptions in string.Construction benchmark setup
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 59b7cd2accf88..f270f1a83af39 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -34,7 +34,6 @@ def setup(self, dtype): # GH37371. Testing construction of string series/frames from ExtensionArrays self.series_cat_arr = Categorical(self.series_arr) - self.frame_cat_arr = Categorical(self.frame_arr) def time_series_construction(self, dtype): Series(self.series_arr, dtype=dtype) @@ -54,12 +53,6 @@ def time_cat_series_construction(self, dtype): def peakmem_cat_series_construction(self, dtype): Series(self.series_cat_arr, dtype=dtype) - def time_cat_frame_construction(self, dtype): - DataFrame(self.frame_cat_arr, dtype=dtype) - - def peakmem_cat_frame_construction(self, dtype): - DataFrame(self.frame_cat_arr, dtype=dtype) - class Methods(Dtypes): def time_center(self, dtype):
#37371 added some new benchmarks, along with some new setup code for the new benchmarks. Unfortunately, the new setup code introduced an uncaught exception: ``` >>> import pandas._testing as tm >>> from pandas import Categorical >>> series_arr = tm.rands_array(nchars=10, size=10**5) >>> frame_arr = series_arr.reshape((50_000, 2)).copy() >>> frame_cat_arr = Categorical(frame_arr) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/nathan/Documents/pandas/pandas/core/arrays/categorical.py", line 399, in __init__ raise NotImplementedError( NotImplementedError: > 1 ndim Categorical are not supported at this time ``` This caused asv to skip all the string construction benchmarks since that PR was merged because the setup had an uncaught exception. See e.g. [this benchmark report](https://asv-runner.github.io/asv-collection/pandas/#strings.Construction.time_series_construction). The fix is just to remove the broken benchmarks. I built a version of pandas from just after #37371 was merged and verified that the benchmark was broken then. I can't easily verify that the benchmark has been broken the entire time since then but I strongly suspect that's the case.
https://api.github.com/repos/pandas-dev/pandas/pulls/52176
2023-03-24T19:09:11Z
2023-03-24T21:28:09Z
2023-03-24T21:28:09Z
2023-03-24T21:28:10Z
BUG: to_numeric converting StringArray to object or float64
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 88ea61a23a426..c3bb33df34e56 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2325,10 +2325,14 @@ def maybe_convert_numeric( if not seen.coerce_numeric: raise type(err)(f"{err} at position {i}") - seen.saw_null() - floats[i] = NaN mask[i] = 1 + if allow_null_in_int: + seen.null_ = True + else: + seen.saw_null() + floats[i] = NaN + if seen.check_uint64_conflict(): return (values, None) diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 97900eacd1f5d..04443f89ddf6f 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -19,8 +19,8 @@ is_integer_dtype, is_number, is_numeric_dtype, - is_object_dtype, is_scalar, + is_string_dtype, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( @@ -30,6 +30,7 @@ from pandas.core.arrays import BaseMaskedArray from pandas.core.arrays.arrow import ArrowDtype +from pandas.core.arrays.string_ import StringDtype if TYPE_CHECKING: from pandas._typing import ( @@ -196,6 +197,8 @@ def to_numeric( else: values = arg + orig_values = values + # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: npt.NDArray[np.bool_] | None = None @@ -220,17 +223,23 @@ def to_numeric( values, set(), coerce_numeric=coerce_numeric, - convert_to_masked_nullable=dtype_backend is not lib.no_default, + convert_to_masked_nullable=dtype_backend is not lib.no_default + or isinstance(values_dtype, StringDtype), ) except (ValueError, TypeError): if errors == "raise": raise + values = orig_values if new_mask is not None: # Remove unnecessary values, is expected later anyway and enables # downcasting values = values[~new_mask] - elif dtype_backend is not lib.no_default and new_mask is None: + elif ( + dtype_backend is not lib.no_default + and new_mask is None + or isinstance(values_dtype, StringDtype) + ): new_mask = np.zeros(values.shape, dtype=np.bool_) # attempt downcast only if the data has been successfully converted @@ -265,7 +274,7 @@ def to_numeric( # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct # masked array - if (mask is not None or new_mask is not None) and not is_object_dtype(values.dtype): + if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype): if mask is None: mask = new_mask else: diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 4a0b01a275523..fe6794b120681 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -723,12 +723,12 @@ def test_precision_float_conversion(strrep): @pytest.mark.parametrize( "values, expected", [ - (["1", "2", None], Series([1, 2, np.nan])), - (["1", "2", "3"], Series([1, 2, 3])), - (["1", "2", 3], Series([1, 2, 3])), - (["1", "2", 3.5], Series([1, 2, 3.5])), - (["1", None, 3.5], Series([1, np.nan, 3.5])), - (["1", "2", "3.5"], Series([1, 2, 3.5])), + (["1", "2", None], Series([1, 2, np.nan], dtype="Int64")), + (["1", "2", "3"], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3.5], Series([1, 2, 3.5], dtype="Float64")), + (["1", None, 3.5], Series([1, np.nan, 3.5], dtype="Float64")), + (["1", "2", "3.5"], Series([1, 2, 3.5], dtype="Float64")), ], ) def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected): @@ -738,6 +738,24 @@ def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected tm.assert_series_equal(result, expected) +def test_to_numeric_from_nullable_string_coerce(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + result = to_numeric(ser, errors="coerce") + expected = Series([pd.NA, 1], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_ignore(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + expected = ser.copy() + result = to_numeric(ser, errors="ignore") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( "data, input_dtype, downcast, expected_dtype", (
- [x] closes #52146 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. don't need a whatsnew if we get it in for 2.0
https://api.github.com/repos/pandas-dev/pandas/pulls/52174
2023-03-24T18:58:45Z
2023-03-24T21:28:34Z
2023-03-24T21:28:34Z
2023-03-25T05:21:22Z
CI: Use dependabot to update Github Actions
diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml new file mode 100644 index 0000000000000..784206dfe67ff --- /dev/null +++ b/.github/workflows/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + labels: + - "CI" + - "Dependencies"
Looks like dependabot finally removed enabling by default for forks (https://github.blog/changelog/2022-11-07-dependabot-pull-requests-off-by-default-for-forks/), so it would be nice to use it to keep our Github Actions up to date. Mirrored off of Numpy's configuration but set the frequency to weekly: https://github.com/numpy/numpy/blob/main/.github/dependabot.yml
https://api.github.com/repos/pandas-dev/pandas/pulls/52173
2023-03-24T18:23:17Z
2023-03-25T20:44:33Z
2023-03-25T20:44:33Z
2023-03-25T20:44:37Z
DOC: update SemiMonthEnd examples to not use (n=0)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index ff068921545c5..9718641e75f60 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2815,10 +2815,10 @@ cdef class SemiMonthEnd(SemiMonthOffset): >>> ts + pd.offsets.SemiMonthEnd() Timestamp('2022-02-15 00:00:00') - If you want to get the result for the current month pass the parameter n equals 0: + If you want to get the result for the current month: >>> ts = pd.Timestamp(2022, 1, 15) - >>> ts + pd.offsets.SemiMonthEnd(0) + >>> pd.offsets.SemiMonthEnd().rollforward(ts) Timestamp('2022-01-15 00:00:00') """ _prefix = "SM"
closes #52169 this was brought up on the call as something to update by 2.0, as `n=0` is a bit unintuitive to work with
https://api.github.com/repos/pandas-dev/pandas/pulls/52171
2023-03-24T17:59:27Z
2023-03-24T20:35:57Z
2023-03-24T20:35:57Z
2023-03-24T20:36:06Z
Backport PR #52161 on branch 2.0.x (Docs/update issue 52106)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7d678c60a2737..f2869b1779b52 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2543,7 +2543,6 @@ cdef class MonthEnd(MonthOffset): DateOffset of one month end. MonthEnd goes to the next date which is an end of the month. - To get the end of the current month pass the parameter n equals 0. See Also -------- @@ -2559,10 +2558,10 @@ cdef class MonthEnd(MonthOffset): >>> ts + pd.offsets.MonthEnd() Timestamp('2022-02-28 00:00:00') - If you want to get the end of the current month pass the parameter n equals 0: + If you want to get the end of the current month: >>> ts = pd.Timestamp(2022, 1, 31) - >>> ts + pd.offsets.MonthEnd(0) + >>> pd.offsets.MonthEnd().rollforward(ts) Timestamp('2022-01-31 00:00:00') """ _period_dtype_code = PeriodDtypeCode.M @@ -2589,7 +2588,6 @@ cdef class BusinessMonthEnd(MonthOffset): DateOffset increments between the last business day of the month. BusinessMonthEnd goes to the next date which is the last business day of the month. - To get the last business day of the current month pass the parameter n equals 0. Examples -------- @@ -2601,11 +2599,10 @@ cdef class BusinessMonthEnd(MonthOffset): >>> ts + pd.offsets.BMonthEnd() Timestamp('2022-12-30 00:00:00') - If you want to get the end of the current business month - pass the parameter n equals 0: + If you want to get the end of the current business month: >>> ts = pd.Timestamp(2022, 11, 30) - >>> ts + pd.offsets.BMonthEnd(0) + >>> pd.offsets.BMonthEnd().rollforward(ts) Timestamp('2022-11-30 00:00:00') """ _prefix = "BM"
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52170
2023-03-24T17:22:38Z
2023-03-24T20:33:53Z
2023-03-24T20:33:52Z
2023-03-24T20:33:53Z
DEPR: _metadata propagation
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index efc8bc695df85..f8e0ec05bd5c0 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -236,6 +236,7 @@ Deprecations - Deprecated :func:`is_interval_dtype`, check ``isinstance(dtype, pd.IntervalDtype)`` instead (:issue:`52607`) - Deprecated :meth:`DataFrame.applymap`. Use the new :meth:`DataFrame.map` method instead (:issue:`52353`) - Deprecated :meth:`DataFrame.swapaxes` and :meth:`Series.swapaxes`, use :meth:`DataFrame.transpose` or :meth:`Series.transpose` instead (:issue:`51946`) +- Deprecated ``_metadata`` propagation (:issue:`51280`) - Deprecated ``freq`` parameter in :class:`PeriodArray` constructor, pass ``dtype`` instead (:issue:`52462`) - Deprecated behavior of :func:`concat` when :class:`DataFrame` has columns that are all-NA, in a future version these will not be discarded when determining the resulting dtype (:issue:`40893`) - Deprecated behavior of :meth:`Series.dt.to_pydatetime`, in a future version this will return a :class:`Series` containing python ``datetime`` objects instead of an ``ndarray`` of datetimes; this matches the behavior of other :meth:`Series.dt` properties (:issue:`20306`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f3de296841510..03cdb2d47be26 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6057,6 +6057,14 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) + if name != "_name": + warnings.warn( + "_metadata propagation is deprecated and will be removed " + "in a future version. To retain the old behavior, " + "override __finalize__ in a subclass.", + FutureWarning, + stacklevel=find_stack_level(), + ) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": @@ -6111,6 +6119,12 @@ def __setattr__(self, name: str, value) -> None: if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: + warnings.warn( + "_metadata handling is deprecated and will be removed in " + "a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) object.__setattr__(self, name, value) else: try: diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 090b3d64e7c41..d77ff8ddbdd84 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -2069,12 +2069,18 @@ def _constructor(self): def _constructor_sliced(self): return SubclassedSeries - sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]}) - result = sdf * 2 - expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]}) + msg = "_metadata handling is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]}) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = sdf * 2 + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]}) tm.assert_frame_equal(result, expected) - result = sdf + sdf + with tm.assert_produces_warning(FutureWarning, match=msg): + result = sdf + sdf tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 5c44a957b9373..e4eec8d1ddbb9 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pytest @@ -12,6 +14,8 @@ ) import pandas._testing as tm +warn_msg = "_metadata propagation is deprecated" + @pytest.fixture() def gpd_style_subclass_df(): @@ -80,22 +84,30 @@ def custom_frame_function(self): assert isinstance(cdf_multi2["A"], CustomSeries) def test_dataframe_metadata(self): + setattr_msg = "_metadata handling is deprecated" + df = tm.SubclassedDataFrame( {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] ) - df.testattr = "XXX" + with tm.assert_produces_warning(FutureWarning, match=setattr_msg): + df.testattr = "XXX" assert df.testattr == "XXX" - assert df[["X"]].testattr == "XXX" - assert df.loc[["a", "b"], :].testattr == "XXX" - assert df.iloc[[0, 1], :].testattr == "XXX" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert df[["X"]].testattr == "XXX" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert df.loc[["a", "b"], :].testattr == "XXX" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert df.iloc[[0, 1], :].testattr == "XXX" # see gh-9776 - assert df.iloc[0:1, :].testattr == "XXX" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert df.iloc[0:1, :].testattr == "XXX" # see gh-10553 unpickled = tm.round_trip_pickle(df) - tm.assert_frame_equal(df, unpickled) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(df, unpickled) assert df._metadata == unpickled._metadata assert df.testattr == unpickled.testattr @@ -104,32 +116,38 @@ def test_indexing_sliced(self): df = tm.SubclassedDataFrame( {"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"] ) - res = df.loc[:, "X"] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.loc[:, "X"] exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - res = df.iloc[:, 1] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.iloc[:, 1] exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - res = df.loc[:, "Z"] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.loc[:, "Z"] exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - res = df.loc["a", :] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.loc["a", :] exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - res = df.iloc[1, :] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.iloc[1, :] exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - res = df.loc["c", :] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.loc["c", :] exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) @@ -153,7 +171,8 @@ def test_subclass_align(self): {"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD") ) - res1, res2 = df1.align(df2, axis=0) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res1, res2 = df1.align(df2, axis=0) exp1 = tm.SubclassedDataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), @@ -163,15 +182,20 @@ def test_subclass_align(self): index=list("ABCDE"), ) assert isinstance(res1, tm.SubclassedDataFrame) - tm.assert_frame_equal(res1, exp1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedDataFrame) - tm.assert_frame_equal(res2, exp2) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res2, exp2) - res1, res2 = df1.a.align(df2.c) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res1, res2 = df1.a.align(df2.c) assert isinstance(res1, tm.SubclassedSeries) - tm.assert_series_equal(res1, exp1.a) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_series_equal(res1, exp1.a) assert isinstance(res2, tm.SubclassedSeries) - tm.assert_series_equal(res2, exp2.c) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_series_equal(res2, exp2.c) def test_subclass_align_combinations(self): # GH 12983 @@ -179,7 +203,8 @@ def test_subclass_align_combinations(self): s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x") # frame + series - res1, res2 = df.align(s, axis=0) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res1, res2 = df.align(s, axis=0) exp1 = tm.SubclassedDataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), @@ -190,23 +215,27 @@ def test_subclass_align_combinations(self): ) assert isinstance(res1, tm.SubclassedDataFrame) - tm.assert_frame_equal(res1, exp1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedSeries) tm.assert_series_equal(res2, exp2) # series + frame - res1, res2 = s.align(df) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res1, res2 = s.align(df) assert isinstance(res1, tm.SubclassedSeries) tm.assert_series_equal(res1, exp2) assert isinstance(res2, tm.SubclassedDataFrame) - tm.assert_frame_equal(res2, exp1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res2, exp1) def test_subclass_iterrows(self): # GH 13977 df = tm.SubclassedDataFrame({"a": [1]}) - for i, row in df.iterrows(): - assert isinstance(row, tm.SubclassedSeries) - tm.assert_series_equal(row, df.loc[i]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + for i, row in df.iterrows(): + assert isinstance(row, tm.SubclassedSeries) + tm.assert_series_equal(row, df.loc[i]) def test_subclass_stack(self): # GH 15564 @@ -216,7 +245,8 @@ def test_subclass_stack(self): columns=["X", "Y", "Z"], ) - res = df.stack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack() exp = tm.SubclassedSeries( [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")] ) @@ -252,12 +282,15 @@ def test_subclass_stack_multi(self): ), columns=Index(["W", "X"], name="www"), ) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) - res = df.stack() - tm.assert_frame_equal(res, exp) - - res = df.stack("yyy") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack("yyy") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [ @@ -277,8 +310,10 @@ def test_subclass_stack_multi(self): columns=Index(["y", "z"], name="yyy"), ) - res = df.stack("www") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack("www") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_stack_multi_mixed(self): # GH 15564 @@ -314,12 +349,15 @@ def test_subclass_stack_multi_mixed(self): ), columns=Index(["W", "X"], name="www"), ) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) - res = df.stack() - tm.assert_frame_equal(res, exp) - - res = df.stack("yyy") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack("yyy") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [ @@ -339,8 +377,10 @@ def test_subclass_stack_multi_mixed(self): columns=Index(["y", "z"], name="yyy"), ) - res = df.stack("www") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.stack("www") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_unstack(self): # GH 15564 @@ -350,7 +390,8 @@ def test_subclass_unstack(self): columns=["X", "Y", "Z"], ) - res = df.unstack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack() exp = tm.SubclassedSeries( [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")] ) @@ -378,11 +419,15 @@ def test_subclass_unstack_multi(self): ), ) - res = df.unstack() - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) - res = df.unstack("ccc") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack("ccc") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]], @@ -393,8 +438,10 @@ def test_subclass_unstack_multi(self): ), ) - res = df.unstack("aaa") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack("aaa") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_unstack_multi_mixed(self): # GH 15564 @@ -425,11 +472,15 @@ def test_subclass_unstack_multi_mixed(self): ), ) - res = df.unstack() - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) - res = df.unstack("ccc") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack("ccc") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [ @@ -443,8 +494,10 @@ def test_subclass_unstack_multi_mixed(self): ), ) - res = df.unstack("aaa") - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = df.unstack("aaa") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_pivot(self): # GH 15564 @@ -456,7 +509,8 @@ def test_subclass_pivot(self): } ) - pivoted = df.pivot(index="index", columns="columns", values="values") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + pivoted = df.pivot(index="index", columns="columns", values="values") expected = tm.SubclassedDataFrame( { @@ -467,7 +521,8 @@ def test_subclass_pivot(self): expected.index.name, expected.columns.name = "index", "columns" - tm.assert_frame_equal(pivoted, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(pivoted, expected) def test_subclassed_melt(self): # GH 15564 @@ -480,7 +535,8 @@ def test_subclassed_melt(self): } ) - melted = pd.melt(cheese, id_vars=["first", "last"]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + melted = pd.melt(cheese, id_vars=["first", "last"]) expected = tm.SubclassedDataFrame( [ @@ -492,7 +548,8 @@ def test_subclassed_melt(self): columns=["first", "last", "variable", "value"], ) - tm.assert_frame_equal(melted, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(melted, expected) def test_subclassed_wide_to_long(self): # GH 9762 @@ -518,10 +575,13 @@ def test_subclassed_wide_to_long(self): "id": [0, 1, 2, 0, 1, 2], } expected = tm.SubclassedDataFrame(exp_data) - expected = expected.set_index(["id", "year"])[["X", "A", "B"]] - long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") - tm.assert_frame_equal(long_frame, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(long_frame, expected) def test_subclassed_apply(self): # GH 19822 @@ -544,8 +604,10 @@ def stretch(row): columns=["first", "last", "variable", "value"], ) - df.apply(lambda x: check_row_subclass(x)) - df.apply(lambda x: check_row_subclass(x), axis=1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + df.apply(lambda x: check_row_subclass(x)) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + df.apply(lambda x: check_row_subclass(x), axis=1) expected = tm.SubclassedDataFrame( [ @@ -557,23 +619,30 @@ def stretch(row): columns=["first", "last", "variable", "value"], ) - result = df.apply(lambda x: stretch(x), axis=1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.apply(lambda x: stretch(x), axis=1) assert isinstance(result, tm.SubclassedDataFrame) - tm.assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(result, expected) expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) - result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) assert isinstance(result, tm.SubclassedDataFrame) - tm.assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(result, expected) - result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") assert isinstance(result, tm.SubclassedDataFrame) - tm.assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(result, expected) expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) - result = df.apply(lambda x: [1, 2, 3], axis=1) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.apply(lambda x: [1, 2, 3], axis=1) assert not isinstance(result, tm.SubclassedDataFrame) tm.assert_series_equal(result, expected) @@ -581,7 +650,8 @@ def test_subclassed_reductions(self, all_reductions): # GH 25596 df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = getattr(df, all_reductions)() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = getattr(df, all_reductions)() assert isinstance(result, tm.SubclassedSeries) def test_subclassed_count(self): @@ -592,11 +662,13 @@ def test_subclassed_count(self): "Single": [False, True, True, True, False], } ) - result = df.count() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.count() assert isinstance(result, tm.SubclassedSeries) df = tm.SubclassedDataFrame({"A": [1, 0, 3], "B": [0, 5, 6], "C": [7, 8, 0]}) - result = df.count() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.count() assert isinstance(result, tm.SubclassedSeries) df = tm.SubclassedDataFrame( @@ -608,23 +680,27 @@ def test_subclassed_count(self): list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] ), ) - result = df.count() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.count() assert isinstance(result, tm.SubclassedSeries) df = tm.SubclassedDataFrame() - result = df.count() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.count() assert isinstance(result, tm.SubclassedSeries) def test_isin(self): df = tm.SubclassedDataFrame( {"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"] ) - result = df.isin([0, 2]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.isin([0, 2]) assert isinstance(result, tm.SubclassedDataFrame) def test_duplicated(self): df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.duplicated() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.duplicated() assert isinstance(result, tm.SubclassedSeries) df = tm.SubclassedDataFrame() @@ -634,26 +710,33 @@ def test_duplicated(self): @pytest.mark.parametrize("idx_method", ["idxmax", "idxmin"]) def test_idx(self, idx_method): df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = getattr(df, idx_method)() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = getattr(df, idx_method)() assert isinstance(result, tm.SubclassedSeries) def test_dot(self): df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) s = tm.SubclassedSeries([1, 1, 2, 1]) - result = df.dot(s) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.dot(s) assert isinstance(result, tm.SubclassedSeries) df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) s = tm.SubclassedDataFrame([1, 1, 2, 1]) - result = df.dot(s) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.dot(s) assert isinstance(result, tm.SubclassedDataFrame) def test_memory_usage(self): df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.memory_usage() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.memory_usage() assert isinstance(result, tm.SubclassedSeries) - result = df.memory_usage(index=False) + with warnings.catch_warnings(): + # this only warns in the CoW build + warnings.filterwarnings("ignore") + result = df.memory_usage(index=False) assert isinstance(result, tm.SubclassedSeries) @td.skip_if_no_scipy @@ -666,7 +749,8 @@ def test_corrwith(self): df2 = tm.SubclassedDataFrame( np.random.randn(4, 4), index=index[:4], columns=columns ) - correls = df1.corrwith(df2, axis=1, drop=True, method="kendall") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + correls = df1.corrwith(df2, axis=1, drop=True, method="kendall") assert isinstance(correls, (tm.SubclassedSeries)) @@ -682,10 +766,12 @@ def test_asof(self): index=rng, ) - result = df.asof(rng[-2:]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.asof(rng[-2:]) assert isinstance(result, tm.SubclassedDataFrame) - result = df.asof(rng[-2]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.asof(rng[-2]) assert isinstance(result, tm.SubclassedSeries) result = df.asof("1989-12-31") @@ -695,20 +781,23 @@ def test_idxmin_preserves_subclass(self): # GH 28330 df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.idxmin() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.idxmin() assert isinstance(result, tm.SubclassedSeries) def test_idxmax_preserves_subclass(self): # GH 28330 df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.idxmax() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.idxmax() assert isinstance(result, tm.SubclassedSeries) def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df): # GH 43668 df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.convert_dtypes() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.convert_dtypes() assert isinstance(result, tm.SubclassedDataFrame) result = gpd_style_subclass_df.convert_dtypes() @@ -718,7 +807,8 @@ def test_astype_preserves_subclass(self): # GH#40810 df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) - result = df.astype({"A": np.int64, "B": np.int32, "C": np.float64}) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.astype({"A": np.int64, "B": np.int32, "C": np.float64}) assert isinstance(result, tm.SubclassedDataFrame) def test_equals_subclass(self): @@ -732,7 +822,9 @@ def test_equals_subclass(self): def test_replace_list_method(self): # https://github.com/pandas-dev/pandas/pull/46018 df = tm.SubclassedDataFrame({"A": [0, 1, 2]}) - result = df.replace([1, 2], method="ffill") + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.replace([1, 2], method="ffill") expected = tm.SubclassedDataFrame({"A": [0, 0, 0]}) assert isinstance(result, tm.SubclassedDataFrame) - tm.assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 79f055909fdea..70cf44558f9fe 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -118,8 +118,11 @@ def finalize(self, other, method=None, **kwargs): df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"]) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"]) DataFrame._metadata = ["filename"] - df1.filename = "fname1.csv" - df2.filename = "fname2.csv" + msg = "_metadata handling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df1.filename = "fname1.csv" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2.filename = "fname2.csv" result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") assert result.filename == "fname1.csv|fname2.csv" @@ -127,7 +130,8 @@ def finalize(self, other, method=None, **kwargs): # concat # GH#6927 df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab")) - df1.filename = "foo" + with tm.assert_produces_warning(FutureWarning, match=msg): + df1.filename = "foo" result = pd.concat([df1, df1]) assert result.filename == "foo+foo" diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index ee0a7fb77f336..c2a6f1e65a39c 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -122,6 +122,7 @@ def test_metadata_propagation_indiv_resample(self): def test_metadata_propagation_indiv(self, monkeypatch): # check that the metadata matches up on the resulting ops + msg = "_metadata handling is deprecated" ser = Series(range(3), range(3)) ser.name = "foo" @@ -151,8 +152,10 @@ def finalize(self, other, method=None, **kwargs): m.setattr(Series, "_metadata", ["name", "filename"]) m.setattr(Series, "__finalize__", finalize) - ser.filename = "foo" - ser2.filename = "bar" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser.filename = "foo" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser2.filename = "bar" result = pd.concat([ser, ser2]) assert result.filename == "foo+bar" diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 601e67bbca5e3..bff6ada3947da 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -28,12 +28,38 @@ def test_groupby_preserves_subclass(obj, groupby_func): grouped = obj.groupby(np.arange(0, 10)) # Groups should preserve subclass type - assert isinstance(grouped.get_group(0), type(obj)) + msg = "_metadata propagation is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grp = grouped.get_group(0) + assert isinstance(grp, type(obj)) args = get_groupby_method_args(groupby_func, obj) - result1 = getattr(grouped, groupby_func)(*args) - result2 = grouped.agg(groupby_func, *args) + warn = None + if groupby_func in [ + "fillna", + "diff", + "sum", + "pct_change", + "shift", + "prod", + "mean", + "median", + "first", + "last", + "max", + "min", + "idxmax", + "idxmin", + "corrwith", + ]: + warn = FutureWarning + if groupby_func == "nunique" and obj.ndim == 2: + warn = FutureWarning + with tm.assert_produces_warning(warn, match=msg): + result1 = getattr(grouped, groupby_func)(*args) + with tm.assert_produces_warning(warn, match=msg): + result2 = grouped.agg(groupby_func, *args) # Reduction or transformation kernels should preserve type slices = {"ngroup", "cumcount", "size"} @@ -44,18 +70,24 @@ def test_groupby_preserves_subclass(obj, groupby_func): # Confirm .agg() groupby operations return same results if isinstance(result1, DataFrame): - tm.assert_frame_equal(result1, result2) + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_frame_equal(result1, result2) else: tm.assert_series_equal(result1, result2) def test_groupby_preserves_metadata(): # GH-37343 + set_msg = "_metadata handling is deprecated" + warn_msg = "_metadata propagation is deprecated" + custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]}) assert "testattr" in custom_df._metadata - custom_df.testattr = "hello" - for _, group_df in custom_df.groupby("c"): - assert group_df.testattr == "hello" + with tm.assert_produces_warning(FutureWarning, match=set_msg): + custom_df.testattr = "hello" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + for _, group_df in custom_df.groupby("c"): + assert group_df.testattr == "hello" # GH-45314 def func(group): @@ -63,8 +95,7 @@ def func(group): assert hasattr(group, "testattr") return group.testattr - msg = "DataFrameGroupBy.apply operated on the grouping columns" - with tm.assert_produces_warning(FutureWarning, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): result = custom_df.groupby("c").apply(func) expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c")) tm.assert_series_equal(result, expected) @@ -75,10 +106,13 @@ def func2(group): return group.testattr custom_series = tm.SubclassedSeries([1, 2, 3]) - custom_series.testattr = "hello" - result = custom_series.groupby(custom_df["c"]).apply(func2) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + custom_series.testattr = "hello" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = custom_series.groupby(custom_df["c"]).apply(func2) tm.assert_series_equal(result, expected) - result = custom_series.groupby(custom_df["c"]).agg(func2) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = custom_series.groupby(custom_df["c"]).agg(func2) tm.assert_series_equal(result, expected) @@ -100,7 +134,10 @@ def test_groupby_resample_preserves_subclass(obj): ], } ) - df = df.set_index("Date") + msg = "_metadata propagation is deprecated" + warn = None if obj is DataFrame else FutureWarning + with tm.assert_produces_warning(warn, match=msg): + df = df.set_index("Date") # Confirm groupby.resample() preserves dataframe type msg = "DataFrameGroupBy.resample operated on the grouping columns" diff --git a/pandas/tests/io/pytables/test_subclass.py b/pandas/tests/io/pytables/test_subclass.py index 823d2875c5417..958bb981507b0 100644 --- a/pandas/tests/io/pytables/test_subclass.py +++ b/pandas/tests/io/pytables/test_subclass.py @@ -17,6 +17,9 @@ class TestHDFStoreSubclass: # GH 33748 + + # _metadata warning only shows up on ArrayManager build + @pytest.mark.filterwarnings("ignore") def test_supported_for_subclass_dataframe(self, tmp_path): data = {"a": [1, 2], "b": [3, 4]} sdf = tm.SubclassedDataFrame(data, dtype=np.intp) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index be53209d889ee..bd8bee5c84fbd 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -86,8 +86,12 @@ def _constructor(self): opname = all_arithmetic_operators op = getattr(Series, opname) m = MySeries([1, 2, 3], name="test") - m.x = 42 - result = op(m, 1) + msg = "_metadata handling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + m.x = 42 + msg = "_metadata propagation is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = op(m, 1) assert result.x == 42 def test_flex_add_scalar_fill_value(self): diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py index a3550c6de6780..16d29f67f9a77 100644 --- a/pandas/tests/series/test_subclass.py +++ b/pandas/tests/series/test_subclass.py @@ -4,6 +4,8 @@ import pandas as pd import pandas._testing as tm +warn_msg = "_metadata propagation is deprecated" + class TestSeriesSubclassing: @pytest.mark.parametrize( @@ -15,15 +17,18 @@ class TestSeriesSubclassing: ) def test_indexing_sliced(self, idx_method, indexer, exp_data, exp_idx): s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd")) - res = getattr(s, idx_method)[indexer] + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = getattr(s, idx_method)[indexer] exp = tm.SubclassedSeries(exp_data, index=list(exp_idx)) tm.assert_series_equal(res, exp) def test_to_frame(self): s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx") - res = s.to_frame() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = s.to_frame() exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd")) - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_unstack(self): # GH 15564 @@ -32,7 +37,8 @@ def test_subclass_unstack(self): res = s.unstack() exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"]) - tm.assert_frame_equal(res, exp) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + tm.assert_frame_equal(res, exp) def test_subclass_empty_repr(self): sub_series = tm.SubclassedSeries() @@ -41,9 +47,11 @@ def test_subclass_empty_repr(self): def test_asof(self): N = 3 rng = pd.date_range("1/1/1990", periods=N, freq="53s") + s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng) - result = s.asof(rng[-2:]) + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = s.asof(rng[-2:]) assert isinstance(result, tm.SubclassedSeries) def test_explode(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref #51280
https://api.github.com/repos/pandas-dev/pandas/pulls/52168
2023-03-24T16:25:36Z
2023-05-04T15:33:16Z
null
2023-05-05T14:03:50Z
DOC link to pandas-coverage app
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 9178032c31371..9d26e77082452 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -812,7 +812,8 @@ install pandas) by typing:: your installation is probably fine and you can start contributing! Often it is worth running only a subset of tests first around your changes before running the -entire suite. +entire suite (tip: you can use the [pandas-coverage app](https://pandas-coverage.herokuapp.com/) +to find out which tests hit the lines of code you've modified, and then run only those). The easiest way to do this is with::
As suggested on Slack @Dr-Irv
https://api.github.com/repos/pandas-dev/pandas/pulls/52163
2023-03-24T15:08:41Z
2023-03-24T16:55:47Z
2023-03-24T16:55:47Z
2023-03-24T16:55:57Z
WEB: remove links to pandas-governance + assorted cleanups
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst index 994dfde0894f3..97de0fb343223 100644 --- a/doc/source/development/maintaining.rst +++ b/doc/source/development/maintaining.rst @@ -174,6 +174,8 @@ conversation is over. It's typically best to give the reporter some time to respond or self-close their issue if it's determined that the behavior is not a bug, or the feature is out of scope. Sometimes reporters just go away though, and we'll close the issue after the conversation has died. +If you think an issue should be closed but are not completely sure, please apply +the "closing candidate" label and wait for other maintainers to take a look. .. _maintaining.reviewing: @@ -252,14 +254,16 @@ Cleaning up old pull requests Occasionally, contributors are unable to finish off a pull request. If some time has passed (two weeks, say) since the last review requesting changes, gently ask if they're still interested in working on this. If another two weeks or -so passes with no response, thank them for their work and close the pull request. -Comment on the original issue that "There's a stalled PR at #1234 that may be -helpful.", and perhaps label the issue as "Good first issue" if the PR was relatively -close to being accepted. +so passes with no response, thank them for their work and then either: -Additionally, core-team members can push to contributors branches. This can be -helpful for pushing an important PR across the line, or for fixing a small -merge conflict. +- close the pull request; +- push to the contributor's branch to push their work over the finish line (if + you're part of ``pandas-core``). This can be helpful for pushing an important PR + across the line, or for fixing a small merge conflict. + +If closing the pull request, then please comment on the original issue that +"There's a stalled PR at #1234 that may be helpful.", and perhaps label the issue +as "Good first issue" if the PR was relatively close to being accepted. Becoming a pandas maintainer ---------------------------- @@ -276,12 +280,13 @@ The required steps for adding a maintainer are: * ``pandas-core`` is for core team members * ``pandas-triage`` is for pandas triage members +If adding to ``pandas-core``, there are two additional steps: + 3. Add the contributor to the pandas Google group. 4. Create a pull request to add the contributor's GitHub handle to ``pandas-dev/pandas/web/pandas/config.yml``. -5. Create a pull request to add the contributor's name/GitHub handle to the `governance document <https://github.com/pandas-dev/pandas-governance/blob/master/people.md>`_. The current list of core-team members is at -https://github.com/pandas-dev/pandas-governance/blob/master/people.md +https://github.com/pandas-dev/pandas/blob/main/web/pandas/config.yml .. _maintaining.merging: @@ -496,5 +501,5 @@ Post-Release - Twitter, Mastodon and Telegram -.. _governance documents: https://github.com/pandas-dev/pandas-governance +.. _governance documents: https://github.com/pandas-dev/pandas/blob/main/web/pandas/about/governance.md .. _list of permissions: https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst index 8c1be6d9a08c1..05a7d63b7ff47 100644 --- a/doc/source/getting_started/overview.rst +++ b/doc/source/getting_started/overview.rst @@ -154,7 +154,7 @@ project and makes it possible to `donate <https://pandas.pydata.org/donate.html> Project governance ------------------ -The governance process that pandas project has used informally since its inception in 2008 is formalized in `Project Governance documents <https://github.com/pandas-dev/pandas-governance>`__. +The governance process that pandas project has used informally since its inception in 2008 is formalized in `Project Governance documents <https://github.com/pandas-dev/pandas/blob/main/web/pandas/about/governance.md>`__. The documents clarify how decisions are made and how the various elements of our community interact, including the relationship between open source collaborative development and work that may be funded by for-profit or non-profit entities. Wes McKinney is the Benevolent Dictator for Life (BDFL). @@ -162,7 +162,7 @@ Wes McKinney is the Benevolent Dictator for Life (BDFL). Development team ----------------- -The list of the Core Team members and more detailed information can be found on the `people’s page <https://github.com/pandas-dev/pandas-governance/blob/master/people.md>`__ of the governance repo. +The list of the Core Team members and more detailed information can be found on the `pandas website <https://pandas.pydata.org/about/team.html>`__. Institutional partners
null
https://api.github.com/repos/pandas-dev/pandas/pulls/52162
2023-03-24T14:31:29Z
2023-03-31T17:34:10Z
2023-03-31T17:34:10Z
2023-03-31T17:34:19Z
DOC: update examples in MonthBegin/MonthEnd to use rollbackward/rollforward
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 0e2ac692e579c..ff068921545c5 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2546,7 +2546,6 @@ cdef class MonthEnd(MonthOffset): DateOffset of one month end. MonthEnd goes to the next date which is an end of the month. - To get the end of the current month pass the parameter n equals 0. See Also -------- @@ -2562,10 +2561,10 @@ cdef class MonthEnd(MonthOffset): >>> ts + pd.offsets.MonthEnd() Timestamp('2022-02-28 00:00:00') - If you want to get the end of the current month pass the parameter n equals 0: + If you want to get the end of the current month: >>> ts = pd.Timestamp(2022, 1, 31) - >>> ts + pd.offsets.MonthEnd(0) + >>> pd.offsets.MonthEnd().rollforward(ts) Timestamp('2022-01-31 00:00:00') """ _period_dtype_code = PeriodDtypeCode.M @@ -2578,7 +2577,6 @@ cdef class MonthBegin(MonthOffset): DateOffset of one month at beginning. MonthBegin goes to the next date which is a start of the month. - To get the start of the current month pass the parameter n equals 0. See Also -------- @@ -2594,10 +2592,10 @@ cdef class MonthBegin(MonthOffset): >>> ts + pd.offsets.MonthBegin() Timestamp('2023-01-01 00:00:00') - If you want to get the start of the current month pass the parameter n equals 0: + If you want to get the start of the current month: >>> ts = pd.Timestamp(2022, 12, 1) - >>> ts + pd.offsets.MonthBegin(0) + >>> pd.offsets.MonthBegin().rollback(ts) Timestamp('2022-12-01 00:00:00') """ _prefix = "MS" @@ -2609,7 +2607,6 @@ cdef class BusinessMonthEnd(MonthOffset): DateOffset increments between the last business day of the month. BusinessMonthEnd goes to the next date which is the last business day of the month. - To get the last business day of the current month pass the parameter n equals 0. Examples -------- @@ -2621,11 +2618,10 @@ cdef class BusinessMonthEnd(MonthOffset): >>> ts + pd.offsets.BMonthEnd() Timestamp('2022-12-30 00:00:00') - If you want to get the end of the current business month - pass the parameter n equals 0: + If you want to get the end of the current business month: >>> ts = pd.Timestamp(2022, 11, 30) - >>> ts + pd.offsets.BMonthEnd(0) + >>> pd.offsets.BMonthEnd().rollforward(ts) Timestamp('2022-11-30 00:00:00') """ _prefix = "BM" @@ -2637,8 +2633,7 @@ cdef class BusinessMonthBegin(MonthOffset): DateOffset of one month at the first business day. BusinessMonthBegin goes to the next date which is the first business day - of the month. To get the first business day of the current month pass - the parameter n equals 0. + of the month. Examples -------- @@ -2650,11 +2645,10 @@ cdef class BusinessMonthBegin(MonthOffset): >>> ts + pd.offsets.BMonthBegin() Timestamp('2023-01-02 00:00:00') - If you want to get the start of the current business month pass - the parameter n equals 0: + If you want to get the start of the current business month: >>> ts = pd.Timestamp(2022, 12, 1) - >>> ts + pd.offsets.BMonthBegin(0) + >>> pd.offsets.BMonthBegin().rollback(ts) Timestamp('2022-12-01 00:00:00') """ _prefix = "BMS"
- [ ] closes #52106 updated docstring of pandas.offsets.MonthBegin, pandas.offsets.MonthEnd, pandas.offsets.BMonthBegin, pandas.offsets.BMonthEnd the output of the `validate_docsctrings.py` is similar to the below output as follows ``` ################################################################################ #################### Docstring (pandas.offsets.MonthBegin) #################### ################################################################################ DateOffset of one month at beginning. Examples -------- >>> ts = pd.Timestamp(2022, 1, 1) >>> ts + pd.offsets.MonthBegin() Timestamp('2022-02-01 00:00:00') ################################################################################ ################################## Validation ################################## ################################################################################ 2 Errors found: No extended summary found See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/52161
2023-03-24T14:14:35Z
2023-03-24T17:00:58Z
2023-03-24T17:00:58Z
2023-04-11T21:43:57Z
Backport PR #52111 on branch 2.0.x (PERF: DatetimeIndex comparison with Timestamp mismatched resos)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index b8fca76115446..dc7db33faec99 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -959,10 +959,17 @@ def _cmp_method(self, other, op): if not isinstance(other, type(self)): # i.e. Timedelta/Timestamp, cast to ndarray and let # compare_mismatched_resolutions handle broadcasting - other_arr = np.array(other.asm8) + try: + # GH#52080 see if we can losslessly cast to shared unit + other = other.as_unit(self.unit, round_ok=False) + except ValueError: + other_arr = np.array(other.asm8) + return compare_mismatched_resolutions( + self._ndarray, other_arr, op + ) else: other_arr = other._ndarray - return compare_mismatched_resolutions(self._ndarray, other_arr, op) + return compare_mismatched_resolutions(self._ndarray, other_arr, op) other_vals = self._unbox(other) # GH#37462 comparison on i8 values is almost 2x faster than M8/m8
Backport PR #52111: PERF: DatetimeIndex comparison with Timestamp mismatched resos
https://api.github.com/repos/pandas-dev/pandas/pulls/52160
2023-03-24T12:17:23Z
2023-03-24T16:06:42Z
2023-03-24T16:06:42Z
2023-03-24T16:06:42Z
STYLE: Enable TCH for nanops.py
diff --git a/pyproject.toml b/pyproject.toml index 1bc530df74e87..000bdea47c55c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -297,8 +297,6 @@ exclude = [ "pandas/io/*" = ["PLW2901"] "pandas/tests/*" = ["PLW2901"] "pandas/plotting/*" = ["PLW2901"] -# TCH to be enabled gradually -"pandas/core/nanops.py" = ["TCH"] # Keep this one enabled "pandas/_typing.py" = ["TCH"]
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). closes #51740
https://api.github.com/repos/pandas-dev/pandas/pulls/52159
2023-03-24T10:21:41Z
2023-03-24T11:31:23Z
2023-03-24T11:31:23Z
2023-03-24T12:48:55Z
DOC Fix EX01 in docstrings - added examples
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index b3ca4e213aea9..45df480779ee7 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -86,8 +86,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then MSG='Partially validate docstrings (EX01)' ; echo $MSG $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01 --ignore_functions \ pandas.Series.index \ - pandas.Series.hasnans \ - pandas.Series.to_list \ pandas.Series.__iter__ \ pandas.Series.keys \ pandas.Series.item \ @@ -309,7 +307,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then pandas_object \ pandas.api.interchange.from_dataframe \ pandas.Index.values \ - pandas.Index.hasnans \ pandas.Index.dtype \ pandas.Index.inferred_type \ pandas.Index.shape \ diff --git a/pandas/core/base.py b/pandas/core/base.py index 96209ba97c0aa..d085807981fa8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -801,6 +801,12 @@ def tolist(self): -------- numpy.ndarray.tolist : Return the array as an a.ndim-levels deep nested list of Python scalars. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.to_list() + [1, 2, 3] """ return self._values.tolist() @@ -835,6 +841,18 @@ def hasnans(self) -> bool: Returns ------- bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3, None]) + >>> s + 0 1.0 + 1 2.0 + 2 3.0 + 3 NaN + dtype: float64 + >>> s.hasnans + True """ # error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]" # has no attribute "any" diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 273b42d725a91..c93eb0fe3def6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2714,6 +2714,17 @@ def hasnans(self) -> bool: Returns ------- bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3], index=['a', 'b', None]) + >>> s + a 1 + b 2 + None 3 + dtype: int64 + >>> s.index.hasnans + True """ if self._can_hold_na: return bool(self._isnan.any()) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 017721b8a4ee0..c7d80a705b2e4 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -99,4 +99,3 @@ def test_hasnans_uncached_for_series(): assert not hasattr(ser, "_cache") ser.iloc[-1] = np.nan assert ser.hasnans is True - assert Series.hasnans.__doc__ == Index.hasnans.__doc__
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Towards https://github.com/pandas-dev/pandas/issues/37875
https://api.github.com/repos/pandas-dev/pandas/pulls/52158
2023-03-24T09:45:17Z
2023-03-25T13:21:32Z
2023-03-25T13:21:32Z
2023-03-25T13:57:30Z
PERF: Make __iadd__ actually inplace
diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py index 4fd9740f184c8..9eaca050a7fa9 100644 --- a/asv_bench/benchmarks/arithmetic.py +++ b/asv_bench/benchmarks/arithmetic.py @@ -31,6 +31,7 @@ class IntFrameWithScalar: [np.float64, np.int64], [2, 3.0, np.int32(4), np.float64(5)], [ + operator.iadd, operator.add, operator.sub, operator.mul, diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index 1d10d797866f4..ee7c65e06e705 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -185,6 +185,9 @@ def __add__(self, other): """ return self._arith_method(other, operator.add) + def __iadd__(self, other): + return self._arith_method(other, operator.iadd, inplace=True) + @unpack_zerodim_and_defer("__radd__") def __radd__(self, other): return self._arith_method(other, roperator.radd) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index f1df86788ac44..aa2797c8c2cb7 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -615,7 +615,8 @@ def _propagate_mask( # expected "ndarray[Any, dtype[bool_]]") return mask # type: ignore[return-value] - def _arith_method(self, other, op): + def _arith_method(self, other, op, inplace: bool = False): + # TODO: Make sure inplace handled correctly here op_name = op.__name__ omask = None diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4c32be4849706..dd49f2dd2bc59 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -70,6 +70,7 @@ from pandas.core.arrays.sparse.dtype import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com +from pandas.core.computation.expressions import _inplace_ops from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, @@ -1701,7 +1702,12 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): # Ops # ------------------------------------------------------------------------ - def _arith_method(self, other, op): + def _arith_method(self, other, op, inplace: bool = False): + if op in _inplace_ops: + # TODO: Do ops actually inplace on SparseArray + # Replace the inplace op with the normal op + op = _inplace_ops[op] # + op_name = op.__name__ if isinstance(other, SparseArray): diff --git a/pandas/core/base.py b/pandas/core/base.py index b281dace12fcb..e24310d169908 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1223,7 +1223,7 @@ def drop_duplicates(self, *, keep: DropKeep = "first"): def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: return algorithms.duplicated(self._values, keep=keep) - def _arith_method(self, other, op): + def _arith_method(self, other, op, inplace: bool = False): res_name = ops.get_op_result_name(self, other) lvalues = self._values @@ -1234,7 +1234,10 @@ def _arith_method(self, other, op): rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step) with np.errstate(all="ignore"): - result = ops.arithmetic_op(lvalues, rvalues, op) + result = ops.arithmetic_op(lvalues, rvalues, op, inplace) + if inplace and result is lvalues: + # inplace case + return self return self._construct_result(result, name=res_name) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 6219cac4aeb16..82caec985da91 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -64,7 +64,7 @@ def set_numexpr_threads(n=None) -> None: ne.set_num_threads(n) -def _evaluate_standard(op, op_str, a, b): +def _evaluate_standard(op, op_str, a, b, inplace): """ Standard evaluation. """ @@ -92,7 +92,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool: return False -def _evaluate_numexpr(op, op_str, a, b): +def _evaluate_numexpr(op, op_str, a, b, inplace: bool = False): result = None if _can_use_numexpr(op, op_str, a, b, "evaluate"): @@ -109,6 +109,7 @@ def _evaluate_numexpr(op, op_str, a, b): f"a_value {op_str} b_value", local_dict={"a_value": a_value, "b_value": b_value}, casting="safe", + out=a_value if inplace else None, ) except TypeError: # numexpr raises eg for array ** array with integers @@ -128,13 +129,21 @@ def _evaluate_numexpr(op, op_str, a, b): _store_test_result(result is not None) if result is None: - result = _evaluate_standard(op, op_str, a, b) + result = _evaluate_standard(op, op_str, a, b, inplace) return result +# Inplace ops and their fallbacks, in-case +# the operation actually can't be done inplace +_inplace_ops = {operator.iadd: operator.add} + +# For inplace ops, we'll map to the regular variant +# however, we'll also use the out parameter to make it +# inplace _op_str_mapping = { operator.add: "+", + operator.iadd: "+", roperator.radd: "+", operator.mul: "*", roperator.rmul: "*", @@ -223,7 +232,7 @@ def _bool_arith_fallback(op_str, a, b) -> bool: return False -def evaluate(op, a, b, use_numexpr: bool = True): +def evaluate(op, a, b, use_numexpr: bool = True, inplace: bool = False): """ Evaluate and return the expression of the op on a and b. @@ -234,13 +243,27 @@ def evaluate(op, a, b, use_numexpr: bool = True): b : right operand use_numexpr : bool, default True Whether to try to use numexpr. + inplace: bool, default False + Whether to do the op inplace. + If False, will replace + inplace op with the non-inplace version of it. + a must by an ndarray/EA in this case """ op_str = _op_str_mapping[op] + inplace = inplace and op in _inplace_ops + if inplace: + inplace = np.result_type(a, b) == a.dtype + + if op in _inplace_ops and not inplace: + # Replace with the non-inplace variant + # Don't need to change op_str since its the same for inplace/non-inplace + op = _inplace_ops[op] + if op_str is not None: if use_numexpr: # error: "None" not callable - return _evaluate(op, op_str, a, b) # type: ignore[misc] - return _evaluate_standard(op, op_str, a, b) + return _evaluate(op, op_str, a, b, inplace=inplace) # type: ignore[misc] + return _evaluate_standard(op, op_str, a, b, inplace=inplace) def where(cond, a, b, use_numexpr: bool = True): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 89be1c1da34f4..e180041ce21f2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -40,6 +40,7 @@ get_option, using_copy_on_write, ) +from pandas._config.config import option_context from pandas._libs import ( algos as libalgos, @@ -7420,23 +7421,35 @@ def _cmp_method(self, other, op): new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) - def _arith_method(self, other, op): + def _arith_method(self, other, op, inplace: bool = False): if self._should_reindex_frame_op(other, op, 1, None, None): return self._arith_method_with_reindex(other, op) axis: Literal[1] = 1 # only relevant for Series other case other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) - self, other = self._align_for_op(other, axis, flex=True, level=None) + # Disable copy on write, since align will make a shallow copy + # (preventing it from going inplace when it should be able to) + # TODO: better way of doing this? + with option_context("mode.copy_on_write", False): + left, other = self._align_for_op(other, axis, flex=True, level=None) with np.errstate(all="ignore"): - new_data = self._dispatch_frame_op(other, op, axis=axis) - return self._construct_result(new_data) + new_data = left._dispatch_frame_op(other, op, axis=axis, inplace=inplace) + if inplace: + # Need to reindex if not aligned correctly, if inplace requested + # even if we were not able to operate on the underlying arrays inplace + if not new_data._indexed_same(self): + new_data = new_data.reindex_like(self, copy=False) + self._update_inplace(new_data, verify_is_copy=False) + return self + + return left._construct_result(new_data) _logical_method = _arith_method def _dispatch_frame_op( - self, right, func: Callable, axis: AxisInt | None = None + self, right, func: Callable, axis: AxisInt | None = None, inplace: bool = False ) -> DataFrame: """ Evaluate the frame operation func(left, right) by evaluating @@ -7457,12 +7470,16 @@ def _dispatch_frame_op( Caller is responsible for setting np.errstate where relevant. """ # Get the appropriate array-op to apply to each column/block's values. - array_op = ops.get_array_op(func) + array_op = ops.get_array_op(func, inplace=inplace) right = lib.item_from_zerodim(right) if not is_list_like(right): # i.e. scalar, faster than checking np.ndim(right) == 0 - bm = self._mgr.apply(array_op, right=right) + bm = self._mgr.apply(array_op, right=right, block_inplace=inplace) + if inplace: + self._mgr = bm + return self + return self._constructor(bm) elif isinstance(right, DataFrame): @@ -7482,10 +7499,15 @@ def _dispatch_frame_op( # "BlockManager" right._mgr, # type: ignore[arg-type] array_op, + inplace=inplace, ) + if inplace: + self._mgr = bm + return self return self._constructor(bm) elif isinstance(right, Series) and axis == 1: + # TODO: Do arithmetic inplace here? # axis=1 means we want to operate row-by-row assert right.index.equals(self.columns) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b6d862ba2180c..31cc1e245cdf8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11708,11 +11708,6 @@ def _inplace_method(self, other, op) -> Self: ) return self - @final - def __iadd__(self, other) -> Self: - # error: Unsupported left operand type for + ("Type[NDFrame]") - return self._inplace_method(other, type(self).__add__) # type: ignore[operator] - @final def __isub__(self, other) -> Self: # error: Unsupported left operand type for - ("Type[NDFrame]") diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 7c02781b16456..a373fe313e87c 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -312,6 +312,7 @@ def apply( self, f, align_keys: list[str] | None = None, + block_inplace: bool = False, **kwargs, ) -> Self: """ @@ -322,6 +323,10 @@ def apply( f : str or callable Name of the Block method to apply. align_keys: List[str] or None, default None + block_inplace: + Whether to do the operation inplace. + Note: This will always obey Copy on Write rules + (so a black sharing references will never be operated inplace on) **kwargs Keywords to pass to `f` @@ -338,6 +343,10 @@ def apply( aligned_args = {k: kwargs[k] for k in align_keys} for b in self.blocks: + if block_inplace: + # Check if inplace allowed according to CoW + if using_copy_on_write() and b.refs.has_reference(): + b = b.copy() if aligned_args: for k, obj in aligned_args.items(): if isinstance(obj, (ABCSeries, ABCDataFrame)): @@ -1532,11 +1541,13 @@ def reduce(self, func: Callable) -> Self: new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr - def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: + def operate_blockwise( + self, other: BlockManager, array_op, inplace: bool = False + ) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ - return operate_blockwise(self, other, array_op) + return operate_blockwise(self, other, array_op, inplace=inplace) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index 8434ed05571b7..baaa1598a93bf 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -6,6 +6,8 @@ NamedTuple, ) +from pandas._config import using_copy_on_write + from pandas.core.dtypes.common import is_1d_only_ea_dtype if TYPE_CHECKING: @@ -26,7 +28,7 @@ class BlockPairInfo(NamedTuple): def _iter_block_pairs( - left: BlockManager, right: BlockManager + left: BlockManager, right: BlockManager, inplace: bool = False ) -> Iterator[BlockPairInfo]: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) @@ -35,6 +37,10 @@ def _iter_block_pairs( locs = blk.mgr_locs blk_vals = blk.values + if inplace: + # Check if we need to copy according to CoW + if using_copy_on_write() and blk.refs.has_reference(): + blk = blk.copy() left_ea = blk_vals.ndim == 1 rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) @@ -54,13 +60,15 @@ def _iter_block_pairs( def operate_blockwise( - left: BlockManager, right: BlockManager, array_op + left: BlockManager, right: BlockManager, array_op, inplace: bool = False ) -> BlockManager: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) res_blks: list[Block] = [] - for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): + for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs( + left, right, inplace + ): res_values = array_op(lvals, rvals) if ( left_ea diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index b39930da9f711..0dcd93c5842aa 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -188,7 +188,9 @@ def _masked_arith_op(x: np.ndarray, y, op): return result -def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): +def _na_arithmetic_op( + left: np.ndarray, right, op, is_cmp: bool = False, inplace: bool = False +): """ Return the result of evaluating op on the passed in values. @@ -199,8 +201,15 @@ def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): left : np.ndarray right : np.ndarray or scalar Excludes DataFrame, Series, Index, ExtensionArray. + op: object + The operator to call with left and right is_cmp : bool, default False If this a comparison operation. + inplace: bool, default False + If this operation should be done inplace. If not, if op is an inplace operator + e.g. iadd, it will be converted to the non-inplace version of the operator + (so iadd -> add). + This will not convert a non-inplace op to an inplace one, though. Returns ------- @@ -210,11 +219,14 @@ def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): ------ TypeError : invalid operation """ - if isinstance(right, str): - # can never use numexpr - func = op - else: - func = partial(expressions.evaluate, op) + # Can never be inplace/use numexpr if right side is str + right_is_str = isinstance(right, str) + func = partial( + expressions.evaluate, + op, + use_numexpr=not right_is_str, + inplace=inplace and not right_is_str, + ) try: result = func(left, right) @@ -239,7 +251,7 @@ def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): return missing.dispatch_fill_zeros(op, left, right, result) -def arithmetic_op(left: ArrayLike, right: Any, op): +def arithmetic_op(left: ArrayLike, right: Any, op, inplace: bool = False): """ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... @@ -253,6 +265,12 @@ def arithmetic_op(left: ArrayLike, right: Any, op): Cannot be a DataFrame or Index. Series is *not* excluded. op : {operator.add, operator.sub, ...} Or one of the reversed variants from roperator. + inplace: boolean, default False + Whether operation should be done inplace. If not, if op + is an inplace operator(e.g. iadd) it will be converted + to the non-inplace version of it (e.g. iadd -> add) + + This will not convert a non-inplace op to an inplace one, though. Returns ------- @@ -282,7 +300,9 @@ def arithmetic_op(left: ArrayLike, right: Any, op): # error: Argument 1 to "_na_arithmetic_op" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" - res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] + res_values = _na_arithmetic_op( + left, right, op, inplace=inplace + ) # type: ignore[arg-type] return res_values @@ -463,7 +483,7 @@ def fill_bool(x, left=None): return res_values -def get_array_op(op): +def get_array_op(op, inplace: bool = False): """ Return a binary array operation corresponding to the given operator op. @@ -493,6 +513,7 @@ def get_array_op(op): return partial(logical_op, op=op) elif op_name in { "add", + "iadd", "sub", "mul", "truediv", @@ -501,7 +522,7 @@ def get_array_op(op): "divmod", "pow", }: - return partial(arithmetic_op, op=op) + return partial(arithmetic_op, op=op, inplace=inplace) else: raise NotImplementedError(op_name) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2b71eb4a9480d..ab3ef38ff88ce 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5558,9 +5558,32 @@ def _logical_method(self, other, op): res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) - def _arith_method(self, other, op): + def _arith_method(self, other, op, inplace: bool = False): + # We need do_inplace to keep track of whether + # op can be done inplace (according to CoW) + # Still need to track inplace if it can't, since + # we need to "fake" it being done inplace + do_inplace = inplace + if do_inplace: + if using_copy_on_write(): + do_inplace = self._mgr._has_no_reference() + self, other = self._align_for_op(other) - return base.IndexOpsMixin._arith_method(self, other, op) + res = base.IndexOpsMixin._arith_method(self, other, op, do_inplace) + if inplace: + if res is not self: + # Need to update arrays inplace with res, since the + # inplace operation couldn't be done directly + # Delete cacher + self._reset_cacher() + # TODO: the reindex call is copied from _inplace_method in + # generic.py. There shouldn't be any alignment issues so + # this should be unnecessary here, right? + self._update_inplace( + res.reindex_like(self, copy=False), verify_is_copy=False + ) + return self + return res def _align_for_op(self, right, align_asobject: bool = False): """align lhs and rhs Series""" diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py index 7b8bc35f016b1..b8f8b6d5bfe56 100644 --- a/pandas/tests/copy_view/test_methods.py +++ b/pandas/tests/copy_view/test_methods.py @@ -1661,26 +1661,6 @@ def test_update_series(using_copy_on_write): tm.assert_series_equal(view, expected) -def test_inplace_arithmetic_series(): - ser = Series([1, 2, 3]) - data = get_array(ser) - ser *= 2 - assert np.shares_memory(get_array(ser), data) - tm.assert_numpy_array_equal(data, get_array(ser)) - - -def test_inplace_arithmetic_series_with_reference(using_copy_on_write): - ser = Series([1, 2, 3]) - ser_orig = ser.copy() - view = ser[:] - ser *= 2 - if using_copy_on_write: - assert not np.shares_memory(get_array(ser), get_array(view)) - tm.assert_series_equal(ser_orig, view) - else: - assert np.shares_memory(get_array(ser), get_array(view)) - - @pytest.mark.parametrize("copy", [True, False]) def test_transpose(using_copy_on_write, copy, using_array_manager): df = DataFrame({"a": [1, 2, 3], "b": 1}) diff --git a/pandas/tests/copy_view/test_operators.py b/pandas/tests/copy_view/test_operators.py new file mode 100644 index 0000000000000..b4d7319803374 --- /dev/null +++ b/pandas/tests/copy_view/test_operators.py @@ -0,0 +1,248 @@ +import operator + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +arith_operators = [ + [operator.add, operator.iadd], + [operator.sub, operator.isub], + [operator.mul, operator.imul], + [operator.truediv, operator.itruediv], + [operator.floordiv, operator.ifloordiv], + [operator.pow, operator.ipow], +] + + +@pytest.mark.parametrize("op,inplace_op", arith_operators) +def test_inplace_arith_series_scalar(op, inplace_op): + ser = Series([2, 4, 6], dtype="float64") + data = get_array(ser) + expected = op(ser, 2) + inplace_op(ser, 2) + assert np.shares_memory(get_array(ser), data) + tm.assert_numpy_array_equal(data, get_array(ser)) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("op,inplace_op", arith_operators) +def test_inplace_arith_series_scalar_ref(using_copy_on_write, op, inplace_op): + ser = Series([2, 4, 6], dtype="float64") + ser1 = ser + ser_orig = ser.copy() + view = ser[:] + expected = op(ser, 2) + inplace_op(ser, 2) + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(view)) + tm.assert_series_equal(ser_orig, view) + else: + assert np.shares_memory(get_array(ser), get_array(view)) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert ser is ser1 + assert ser._mgr is ser1._mgr + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("op,inplace_op", arith_operators) +def test_inplace_arith_series_series(op, inplace_op): + ser = Series([2, 4, 6], dtype="float64") + other = Series([4, 6, 8], dtype="float64") + data = get_array(ser) + expected = op(ser, other) + inplace_op(ser, other) + assert np.shares_memory(get_array(ser), data) + tm.assert_numpy_array_equal(data, get_array(ser)) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("op,inplace_op", arith_operators) +def test_inplace_arith_series_series_ref(using_copy_on_write, op, inplace_op): + ser = Series([2, 4, 6], dtype="float64") + other = Series([4, 6, 8], dtype="float64") + ser1 = ser + ser_orig = ser.copy() + view = ser[:] + expected = op(ser, other) + inplace_op(ser, other) + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(view)) + tm.assert_series_equal(ser_orig, view) + else: + assert np.shares_memory(get_array(ser), get_array(view)) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert ser is ser1 + assert ser._mgr is ser1._mgr + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_scalar(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + expected = op(df, 2) + inplace_op(df, 2) + for col in df.columns: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_scalar_ref(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + df_orig = df.copy() + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + view = df[:] + expected = op(df, 2) + inplace_op(df, 2) + for col in df.columns: + if using_copy_on_write: + assert not np.shares_memory(get_array(df, col), values) + else: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + if using_copy_on_write: + tm.assert_frame_equal(view, df_orig) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_series(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + ser = Series([1, 2, 3], index=["a", "b", "c"], dtype="float64") + expected = op(df, ser) + inplace_op(df, ser) + for col in df.columns: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_series_ref(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + df_orig = df.copy() + view = df[:] + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + ser = Series([1, 2, 3], index=["a", "b", "c"], dtype="float64") + expected = op(df, ser) + inplace_op(df, ser) + for col in df.columns: + if using_copy_on_write: + assert not np.shares_memory(get_array(df, col), values) + else: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + if using_copy_on_write: + tm.assert_frame_equal(view, df_orig) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_df(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + other = df.copy() + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + expected = op(df, other) + inplace_op(df, other) + for col in df.columns: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "op,inplace_op", + # TODO: Add support for operating inplace to the rest of the arithmetic operators + [[operator.add, operator.iadd]], +) +def test_inplace_arith_df_df_ref(using_copy_on_write, op, inplace_op): + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float64"), + columns=["a", "b", "c"], + ) + other = DataFrame( + np.array([[2, 3, 4], [5, 6, 7], [8, 9, 10]], dtype="float64"), + columns=["a", "b", "c"], + ) + df_orig = df.copy() + view = df[:] + # Values will all be 1 block + values = df._mgr.blocks[0].values + df1 = df + expected = op(df, other) + inplace_op(df, other) + for col in df.columns: + if using_copy_on_write: + assert not np.shares_memory(get_array(df, col), values) + else: + assert np.shares_memory(get_array(df, col), values) + # Identity checks for the inplace op (check that an inplace op returns itself) + assert df is df1 + assert df._mgr is df1._mgr + tm.assert_frame_equal(df, expected) + if using_copy_on_write: + tm.assert_frame_equal(view, df_orig) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index c71ceae762e67..6fbfc202fbbf9 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1717,6 +1717,7 @@ def test_inplace_ops_identity(self): s = s_orig.copy() s2 = s s += 1.5 + assert s is s2 tm.assert_series_equal(s, s2) tm.assert_series_equal(s_orig + 1.5, s) diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 3752c4b5f6917..0b45f3dfd61a3 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -51,6 +51,8 @@ "_arrow_dtype_mapping", "_global_config", "_chained_assignment_msg", + # TODO: Remove once SparseArray supports inplace operations (e.g. __iadd__) + "_inplace_ops", }
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Starting simple with just ``__iadd__`` for now, eventually want to expand to all arith/logical ops? when I'm feeling braver :). Everything except: - SparseArray - MaskedArray(maybe just needs tests?) - Some special case with DF+Series (I can't seem to hit this path normally, need to investigate more) should be implemented. I'll try to get those in a follow up. Hopefully, I'm not using abusing/breaking existing frame/manager plumbing. I've kinda been hackily inserting return self calls all over.
https://api.github.com/repos/pandas-dev/pandas/pulls/52155
2023-03-24T04:03:15Z
2023-08-01T17:20:39Z
null
2023-08-01T17:20:46Z
Revert "BLD: Add pyproject.toml to wheels"
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7eae93a6a27e9..31ed5096991a6 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -173,8 +173,8 @@ jobs: pip install hypothesis>=6.34.2 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 cd .. # Not a good idea to test within the src tree python -c "import pandas; print(pandas.__version__); - pandas.test(extra_args=['-m not clipboard and not single_cpu and not slow and not network and not db', '-n 2', '--no-strict-data-files']); - pandas.test(extra_args=['-m not clipboard and single_cpu and not slow and not network and not db', '--no-strict-data-files'])" + pandas.test(extra_args=['-m not clipboard and not single_cpu and not slow and not network and not db', '-n 2']); + pandas.test(extra_args=['-m not clipboard and single_cpu and not slow and not network and not db'])" - uses: actions/upload-artifact@v3 with: name: sdist diff --git a/ci/test_wheels.py b/ci/test_wheels.py index d6f843d7b2c68..f861c1cbedcad 100644 --- a/ci/test_wheels.py +++ b/ci/test_wheels.py @@ -41,12 +41,10 @@ multi_args = [ "-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2", - "--no-strict-data-files", ] pd.test(extra_args=multi_args) pd.test( extra_args=[ "-m not clipboard and single_cpu and not slow and not network and not db", - "--no-strict-data-files", ] ) diff --git a/ci/test_wheels_windows.bat b/ci/test_wheels_windows.bat index b8724d6d31cb5..6364169e53924 100644 --- a/ci/test_wheels_windows.bat +++ b/ci/test_wheels_windows.bat @@ -1,6 +1,6 @@ set test_command=import pandas as pd; print(pd.__version__); ^ -pd.test(extra_args=['-m not clipboard and not single_cpu and not slow and not network and not db', '--no-strict-data-files', '-n=2']); ^ -pd.test(extra_args=['-m not clipboard and single_cpu and not slow and not network and not db', '--no-strict-data-files']) +pd.test(extra_args=['-m not clipboard and not single_cpu and not slow and not network and not db', '-n 2']); ^ +pd.test(extra_args=['-m not clipboard and single_cpu and not slow and not network and not db']) python --version pip install pytz six numpy python-dateutil tzdata>=2022.1 diff --git a/pandas/conftest.py b/pandas/conftest.py index 68f3c575ee93d..95bb2078d151c 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -103,9 +103,9 @@ def pytest_addoption(parser) -> None: parser.addoption( - "--no-strict-data-files", - action="store_false", - help="Don't fail if a test is skipped for missing data file.", + "--strict-data-files", + action="store_true", + help="Fail if a test is skipped for missing data file.", ) @@ -1112,9 +1112,9 @@ def all_numeric_accumulations(request): @pytest.fixture def strict_data_files(pytestconfig): """ - Returns the configuration for the test setting `--no-strict-data-files`. + Returns the configuration for the test setting `--strict-data-files`. """ - return pytestconfig.getoption("--no-strict-data-files") + return pytestconfig.getoption("--strict-data-files") @pytest.fixture @@ -1134,7 +1134,7 @@ def datapath(strict_data_files: str) -> Callable[..., str]: Raises ------ ValueError - If the path doesn't exist and the --no-strict-data-files option is not set. + If the path doesn't exist and the --strict-data-files option is set. """ BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") @@ -1143,7 +1143,7 @@ def deco(*args): if not os.path.exists(path): if strict_data_files: raise ValueError( - f"Could not find file {path} and --no-strict-data-files is not set." + f"Could not find file {path} and --strict-data-files is set." ) pytest.skip(f"Could not find {path}.") return path diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 15fa10b9e4289..d31f617b9be15 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,6 +1,5 @@ import collections from functools import partial -import os import string import numpy as np @@ -175,12 +174,6 @@ def test_version_tag(): ) -def test_pyproject_present(): - # Check pyproject.toml is present(relevant for wheels) - pyproject_loc = os.path.join(os.path.dirname(__file__), "../../pyproject.toml") - assert os.path.exists(pyproject_loc) - - @pytest.mark.parametrize( "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] ) diff --git a/pyproject.toml b/pyproject.toml index 1bc530df74e87..04c87e3d10a86 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,9 +125,6 @@ include-package-data = true include = ["pandas", "pandas.*"] namespaces = false -[tool.setuptools.package-data] -pandas = ["../pyproject.toml"] - [tool.setuptools.exclude-package-data] "*" = ["*.c", "*.h"] @@ -409,7 +406,7 @@ disable = [ [tool.pytest.ini_options] # sync minversion with pyproject.toml & install.rst minversion = "7.0" -addopts = "--strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" +addopts = "--strict-data-files --strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" empty_parameter_set_mark = "fail_at_collect" xfail_strict = true testpaths = "pandas" @@ -419,7 +416,6 @@ doctest_optionflags = [ "ELLIPSIS", ] filterwarnings = [ - "error::_pytest.warning_types.PytestUnknownMarkWarning", "error:::pandas", "error::ResourceWarning", "error::pytest.PytestUnraisableExceptionWarning",
Reverts pandas-dev/pandas#50330 closes #52141 Was installing pyproject.toml in the wrong place, and there's no way to let configure the install location at least for setuptools. I guess we'll have to wait for meson builds to try to include pyproject.toml.
https://api.github.com/repos/pandas-dev/pandas/pulls/52154
2023-03-24T02:25:55Z
2023-03-24T17:03:57Z
2023-03-24T17:03:57Z
2023-03-24T17:04:11Z
DEPR: flags
diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst index 7894789846ce8..3475dd0cea3f8 100644 --- a/doc/source/user_guide/duplicates.rst +++ b/doc/source/user_guide/duplicates.rst @@ -121,6 +121,7 @@ will be raised. .. ipython:: python :okexcept: + :okwarning: pd.Series([0, 1, 2], index=["a", "b", "b"]).set_flags(allows_duplicate_labels=False) @@ -128,6 +129,7 @@ This applies to both row and column labels for a :class:`DataFrame` .. ipython:: python :okexcept: + :okwarning: pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"],).set_flags( allows_duplicate_labels=False @@ -137,6 +139,7 @@ This attribute can be checked or set with :attr:`~DataFrame.flags.allows_duplica which indicates whether that object can have duplicate labels. .. ipython:: python + :okwarning: df = pd.DataFrame({"A": [0, 1, 2, 3]}, index=["x", "y", "X", "Y"]).set_flags( allows_duplicate_labels=False @@ -148,6 +151,7 @@ which indicates whether that object can have duplicate labels. like ``allows_duplicate_labels`` set to some value .. ipython:: python + :okwarning: df2 = df.set_flags(allows_duplicate_labels=True) df2.flags.allows_duplicate_labels @@ -157,6 +161,7 @@ Or the property can just be set directly on the same object .. ipython:: python + :okwarning: df2.flags.allows_duplicate_labels = False df2.flags.allows_duplicate_labels @@ -193,6 +198,7 @@ operations. .. ipython:: python :okexcept: + :okwarning: s1 = pd.Series(0, index=["a", "b"]).set_flags(allows_duplicate_labels=False) s1 diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index d1481639ca5a0..dbfdd4c85862e 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -597,6 +597,7 @@ Other Deprecations - Deprecated :func:`pd.core.internals.api.make_block`, use public APIs instead (:issue:`40226`) - Deprecated :func:`read_gbq` and :meth:`DataFrame.to_gbq`. Use ``pandas_gbq.read_gbq`` and ``pandas_gbq.to_gbq`` instead https://pandas-gbq.readthedocs.io/en/latest/api.html (:issue:`55525`) - Deprecated :meth:`.DataFrameGroupBy.fillna` and :meth:`.SeriesGroupBy.fillna`; use :meth:`.DataFrameGroupBy.ffill`, :meth:`.DataFrameGroupBy.bfill` for forward and backward filling or :meth:`.DataFrame.fillna` to fill with a single value (or the Series equivalents) (:issue:`55718`) +- Deprecated :meth:`DataFrame.flags`, :meth:`DataFrame.set_flags`, :meth:`Series.flags`, :meth:`Series.set_flags` (:issue:`51280`) - Deprecated :meth:`DatetimeArray.__init__` and :meth:`TimedeltaArray.__init__`, use :func:`array` instead (:issue:`55623`) - Deprecated :meth:`Index.format`, use ``index.astype(str)`` or ``index.map(formatter)`` instead (:issue:`55413`) - Deprecated :meth:`Series.ravel`, the underlying array is already 1D, so ravel is not necessary (:issue:`52511`) diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index e342f76dc724b..48c92afde12f8 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -893,7 +893,9 @@ def assert_series_equal( raise_assert_detail(obj, "Series length are different", msg1, msg2) if check_flags: - assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + assert ( + left._flags == right._flags + ), f"{repr(left._flags)} != {repr(right._flags)}" if check_index: # GH #38183 @@ -1166,7 +1168,9 @@ def assert_frame_equal( ) if check_flags: - assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + assert ( + left._flags == right._flags + ), f"{repr(left._flags)} != {repr(right._flags)}" # index comparison assert_index_equal( diff --git a/pandas/conftest.py b/pandas/conftest.py index 983272d79081e..ef2a54b15bb22 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -168,6 +168,30 @@ def pytest_collection_modifyitems(items, config) -> None: "(Series|DataFrame).bool is now deprecated and will be removed " "in future version of pandas", ), + ( + "Flags", + "DataFrame.flags is deprecated and will be removed in a future version", + ), + ( + "flags", + "DataFrame.flags is deprecated and will be removed in a future version", + ), + ( + "allows_duplicate_labels", + "DataFrame.flags is deprecated and will be removed in a future version", + ), + ( + "set_flags", + "DataFrame.set_flags is deprecated and will be removed in a future version", + ), + ( + "DuplicateLabelError", + "Series.set_flags is deprecated and will be removed in a future version", + ), + ( + "DuplicateLabelError", + "Series.flags is deprecated and will be removed in a future version", + ), ( "pandas.core.generic.NDFrame.first", "first is deprecated and will be removed in a future version. " diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3e2e589440bd9..ed9e09d42a509 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5106,7 +5106,7 @@ def insert( """ if allow_duplicates is lib.no_default: allow_duplicates = False - if allow_duplicates and not self.flags.allows_duplicate_labels: + if allow_duplicates and not self._flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." diff --git a/pandas/core/generic.py b/pandas/core/generic.py index de25a02c6b37c..3d7afb541095f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -440,6 +440,12 @@ def flags(self) -> Flags: False >>> df.flags["allows_duplicate_labels"] = True """ + warnings.warn( + f"{type(self).__name__}.flags is deprecated and will be removed " + "in a future version", + FutureWarning, + stacklevel=find_stack_level(), + ) return self._flags @final @@ -502,6 +508,13 @@ def set_flags( >>> df2.flags.allows_duplicate_labels False """ + warnings.warn( + f"{type(self).__name__}.set_flags is deprecated and will be removed " + "in a future version", + FutureWarning, + stacklevel=find_stack_level(), + ) + df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels @@ -2178,7 +2191,7 @@ def __getstate__(self) -> dict[str, Any]: "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, - "_flags": {k: self.flags[k] for k in self.flags._keys}, + "_flags": {k: self._flags[k] for k in self._flags._keys}, **meta, } @@ -4511,7 +4524,7 @@ def __delitem__(self, key) -> None: @final def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t): - if inplace and not self.flags.allows_duplicate_labels: + if inplace and not self._flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." @@ -6253,7 +6266,7 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: # of an empty dict is 50x more expensive than the empty check. self.attrs = deepcopy(other.attrs) - self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels + self._flags.allows_duplicate_labels = other._flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) @@ -6269,9 +6282,9 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: self.attrs = deepcopy(attrs) allows_duplicate_labels = all( - x.flags.allows_duplicate_labels for x in other.objs + x._flags.allows_duplicate_labels for x in other.objs ) - self.flags.allows_duplicate_labels = allows_duplicate_labels + self._flags.allows_duplicate_labels = allows_duplicate_labels return self diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py index 862aebdc70a9d..5dfb789932f7e 100644 --- a/pandas/tests/copy_view/test_methods.py +++ b/pandas/tests/copy_view/test_methods.py @@ -122,8 +122,12 @@ def test_copy_shallow(using_copy_on_write, warn_copy_on_write): def test_methods_copy_keyword( request, method, copy, using_copy_on_write, using_array_manager ): + warn = None + msg = "(DataFrame|Series).set_flags is deprecated" index = None - if "to_timestamp" in request.node.callspec.id: + if "set_flags" in request.node.callspec.id: + warn = FutureWarning + elif "to_timestamp" in request.node.callspec.id: index = period_range("2012-01-01", freq="D", periods=3) elif "to_period" in request.node.callspec.id: index = date_range("2012-01-01", freq="D", periods=3) @@ -139,7 +143,8 @@ def test_methods_copy_keyword( with tm.assert_produces_warning(FutureWarning, match=msg): df2 = method(df, copy=copy) else: - df2 = method(df, copy=copy) + with tm.assert_produces_warning(warn, match=msg): + df2 = method(df, copy=copy) share_memory = using_copy_on_write or copy is False @@ -197,8 +202,12 @@ def test_methods_copy_keyword( ], ) def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write): + warn = None + msg = "(DataFrame|Series).set_flags is deprecated" index = None - if "to_timestamp" in request.node.callspec.id: + if "set_flags" in request.node.callspec.id: + warn = FutureWarning + elif "to_timestamp" in request.node.callspec.id: index = period_range("2012-01-01", freq="D", periods=3) elif "to_period" in request.node.callspec.id: index = date_range("2012-01-01", freq="D", periods=3) @@ -216,7 +225,8 @@ def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write) with tm.assert_produces_warning(FutureWarning, match=msg): ser2 = method(ser, copy=copy) else: - ser2 = method(ser, copy=copy) + with tm.assert_produces_warning(warn, match=msg): + ser2 = method(ser, copy=copy) share_memory = using_copy_on_write or copy is False @@ -1285,7 +1295,9 @@ def test_series_set_axis(using_copy_on_write): def test_set_flags(using_copy_on_write, warn_copy_on_write): ser = Series([1, 2, 3]) ser_orig = ser.copy() - ser2 = ser.set_flags(allows_duplicate_labels=False) + msg = "Series.set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser2 = ser.set_flags(allows_duplicate_labels=False) assert np.shares_memory(ser, ser2) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index fbf36dbc4fb02..010a1bfc4866a 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -441,7 +441,9 @@ def test_reset_index_duplicate_columns_allow( ): # GH#44755 reset_index with duplicate column labels df = multiindex_df.rename_axis("A") - df = df.set_flags(allows_duplicate_labels=flag) + msg = "DataFrame.set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df = df.set_flags(allows_duplicate_labels=flag) if flag and allow_duplicates: result = df.reset_index(allow_duplicates=allow_duplicates) @@ -464,7 +466,9 @@ def test_reset_index_duplicate_columns_allow( @pytest.mark.parametrize("flag", [False, True]) def test_reset_index_duplicate_columns_default(self, multiindex_df, flag): df = multiindex_df.rename_axis("A") - df = df.set_flags(allows_duplicate_labels=flag) + msg = "DataFrame.set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df = df.set_flags(allows_duplicate_labels=flag) msg = r"cannot insert \('A', ''\), already exists" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index c7b444045a0f2..0e536ffa8cf3a 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -334,19 +334,25 @@ def test_set_flags( obj = obj["A"] key = 0 - result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels) + set_msg = "(DataFrame|Series).set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=set_msg): + result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels) + get_msg = "(DataFrame|Series).flags is deprecated" if allows_duplicate_labels is None: # We don't update when it's not provided - assert result.flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is True else: - assert result.flags.allows_duplicate_labels is allows_duplicate_labels + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is allows_duplicate_labels # We made a copy assert obj is not result # We didn't mutate obj - assert obj.flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert obj.flags.allows_duplicate_labels is True # But we didn't copy data if frame_or_series is Series: @@ -365,9 +371,10 @@ def test_set_flags( result.iloc[key] = 1 # Now we do copy. - result = obj.set_flags( - copy=True, allows_duplicate_labels=allows_duplicate_labels - ) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + result = obj.set_flags( + copy=True, allows_duplicate_labels=allows_duplicate_labels + ) result.iloc[key] = 10 assert obj.iloc[key] == 1 @@ -387,6 +394,9 @@ def test_inspect_getmembers(self): df = DataFrame() msg = "DataFrame._data is deprecated" with tm.assert_produces_warning( - DeprecationWarning, match=msg, check_stacklevel=False + # FutureWarning is for DataFrame.flags + (FutureWarning, DeprecationWarning), + match=msg, + check_stacklevel=False, ): inspect.getmembers(df) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index f54db07824daf..89b80a1e21f99 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -9,6 +9,9 @@ not_implemented = pytest.mark.xfail(reason="Not implemented.") +get_msg = "(DataFrame|Series).flags is deprecated" +set_msg = "(DataFrame|Series).set_flags is deprecated" + # ---------------------------------------------------------------------------- # Preservation @@ -25,10 +28,13 @@ class TestPreserves: ) def test_construction_ok(self, cls, data): result = cls(data) - assert result.flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is True - result = cls(data).set_flags(allows_duplicate_labels=False) - assert result.flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=set_msg): + result = cls(data).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is False @pytest.mark.parametrize( "func", @@ -42,8 +48,12 @@ def test_construction_ok(self, cls, data): ], ) def test_preserved_series(self, func): - s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) - assert func(s).flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=set_msg): + s = pd.Series([0, 1], index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert func(s).flags.allows_duplicate_labels is False @pytest.mark.parametrize( "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])] @@ -51,44 +61,56 @@ def test_preserved_series(self, func): # TODO: frame @not_implemented def test_align(self, other): - s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + s = pd.Series([0, 1], index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) a, b = s.align(other) - assert a.flags.allows_duplicate_labels is False - assert b.flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert a.flags.allows_duplicate_labels is False + assert b.flags.allows_duplicate_labels is False def test_preserved_frame(self): - df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ) - assert df.loc[["a"]].flags.allows_duplicate_labels is False - assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert df.loc[["a"]].flags.allows_duplicate_labels is False + assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False def test_to_frame(self): - ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False) - assert ser.to_frame().flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=set_msg): + ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert ser.to_frame().flags.allows_duplicate_labels is False @pytest.mark.parametrize("func", ["add", "sub"]) @pytest.mark.parametrize("frame", [False, True]) @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")]) def test_binops(self, func, other, frame): - df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) if frame: df = df.to_frame() if isinstance(other, pd.Series) and frame: other = other.to_frame() func = operator.methodcaller(func, other) - assert df.flags.allows_duplicate_labels is False - assert func(df).flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert df.flags.allows_duplicate_labels is False + assert func(df).flags.allows_duplicate_labels is False def test_preserve_getitem(self): - df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) - assert df[["A"]].flags.allows_duplicate_labels is False - assert df["A"].flags.allows_duplicate_labels is False - assert df.loc[0].flags.allows_duplicate_labels is False - assert df.loc[[0]].flags.allows_duplicate_labels is False - assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert df[["A"]].flags.allows_duplicate_labels is False + assert df["A"].flags.allows_duplicate_labels is False + assert df.loc[0].flags.allows_duplicate_labels is False + assert df.loc[[0]].flags.allows_duplicate_labels is False + assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False def test_ndframe_getitem_caching_issue( self, request, using_copy_on_write, warn_copy_on_write @@ -97,10 +119,12 @@ def test_ndframe_getitem_caching_issue( request.applymarker(pytest.mark.xfail(reason="Unclear behavior.")) # NDFrame.__getitem__ will cache the first df['A']. May need to # invalidate that cache? Update the cached entries? - df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) - assert df["A"].flags.allows_duplicate_labels is False - df.flags.allows_duplicate_labels = True - assert df["A"].flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert df["A"].flags.allows_duplicate_labels is False + df.flags.allows_duplicate_labels = True + assert df["A"].flags.allows_duplicate_labels is True @pytest.mark.parametrize( "objs, kwargs", @@ -160,30 +184,28 @@ def test_ndframe_getitem_caching_issue( ], ) def test_concat(self, objs, kwargs): - objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + with tm.assert_produces_warning(FutureWarning, match=set_msg): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] result = pd.concat(objs, **kwargs) - assert result.flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is False @pytest.mark.parametrize( - "left, right, expected", + "left, right, should_set, expected", [ # false false false pytest.param( - pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), - pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags( - allows_duplicate_labels=False - ), + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + (True, True), False, marks=not_implemented, ), # false true false pytest.param( - pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ), + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + (True, False), False, marks=not_implemented, ), @@ -191,13 +213,22 @@ def test_concat(self, objs, kwargs): ( pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + (False, False), True, ), ], ) - def test_merge(self, left, right, expected): + def test_merge(self, left, right, should_set, expected): + should_set_left, should_set_right = should_set + if should_set_left: + with tm.assert_produces_warning(FutureWarning, match=set_msg): + left = left.set_flags(allows_duplicate_labels=False) + if should_set_right: + with tm.assert_produces_warning(FutureWarning, match=set_msg): + right = right.set_flags(allows_duplicate_labels=False) result = pd.merge(left, right, left_index=True, right_index=True) - assert result.flags.allows_duplicate_labels is expected + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is expected @not_implemented def test_groupby(self): @@ -206,9 +237,11 @@ def test_groupby(self): # - apply # - transform # - Should passing a grouper that disallows duplicates propagate? - df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False) result = df.groupby([0, 0, 1]).agg("count") - assert result.flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is False @pytest.mark.parametrize("frame", [True, False]) @not_implemented @@ -221,9 +254,10 @@ def test_window(self, frame): ) if frame: df = df.to_frame() - assert df.rolling(3).mean().flags.allows_duplicate_labels is False - assert df.ewm(3).mean().flags.allows_duplicate_labels is False - assert df.expanding(3).mean().flags.allows_duplicate_labels is False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert df.rolling(3).mean().flags.allows_duplicate_labels is False + assert df.ewm(3).mean().flags.allows_duplicate_labels is False + assert df.expanding(3).mean().flags.allows_duplicate_labels is False # ---------------------------------------------------------------------------- @@ -242,11 +276,13 @@ class TestRaises: ) def test_set_flags_with_duplicates(self, cls, axes): result = cls(**axes) - assert result.flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert result.flags.allows_duplicate_labels is True msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): - cls(**axes).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + cls(**axes).set_flags(allows_duplicate_labels=False) @pytest.mark.parametrize( "data", @@ -259,13 +295,18 @@ def test_set_flags_with_duplicates(self, cls, axes): def test_setting_allows_duplicate_labels_raises(self, data): msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): - data.flags.allows_duplicate_labels = False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + data.flags.allows_duplicate_labels = False - assert data.flags.allows_duplicate_labels is True + with tm.assert_produces_warning(FutureWarning, match=get_msg): + assert data.flags.allows_duplicate_labels is True def test_series_raises(self): a = pd.Series(0, index=["a", "b"]) - b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + b = pd.Series([0, 1], index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.concat([a, b]) @@ -285,9 +326,10 @@ def test_series_raises(self): ], ) def test_getitem_raises(self, getter, target): - df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( - allows_duplicate_labels=False - ) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) if target: # df, df.loc, or df.iloc target = getattr(df, target) @@ -311,16 +353,18 @@ def test_getitem_raises(self, getter, target): ], ) def test_concat_raises(self, objs, kwargs): - objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + with tm.assert_produces_warning(FutureWarning, match=set_msg): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): pd.concat(objs, **kwargs) @not_implemented def test_merge_raises(self): - a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( - allows_duplicate_labels=False - ) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( + allows_duplicate_labels=False + ) b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"]) msg = "Index has duplicates." with pytest.raises(pd.errors.DuplicateLabelError, match=msg): @@ -344,14 +388,20 @@ def test_merge_raises(self): ) def test_raises_basic(idx): msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): - pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) with pytest.raises(pd.errors.DuplicateLabelError, match=msg): - pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + pd.DataFrame({"A": [1, 1]}, index=idx).set_flags( + allows_duplicate_labels=False + ) with pytest.raises(pd.errors.DuplicateLabelError, match=msg): - pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) def test_format_duplicate_labels_message(): @@ -374,7 +424,8 @@ def test_format_duplicate_labels_message_multi(): def test_dataframe_insert_raises(): - df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) msg = "Cannot specify" with pytest.raises(ValueError, match=msg): df.insert(0, "A", [3, 4], allow_duplicates=True) @@ -389,11 +440,13 @@ def test_dataframe_insert_raises(): ], ) def test_inplace_raises(method, frame_only): - df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( - allows_duplicate_labels=False - ) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( + allows_duplicate_labels=False + ) s = df["A"] - s.flags.allows_duplicate_labels = False + with tm.assert_produces_warning(FutureWarning, match=get_msg): + s.flags.allows_duplicate_labels = False msg = "Cannot specify" with pytest.raises(ValueError, match=msg): @@ -404,10 +457,12 @@ def test_inplace_raises(method, frame_only): def test_pickle(): - a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) b = tm.round_trip_pickle(a) tm.assert_series_equal(a, b) - a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) b = tm.round_trip_pickle(a) tm.assert_frame_equal(a, b) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 6564e381af0ea..547c60367fd42 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -490,9 +490,12 @@ def test_flags_identity(self, frame_or_series): if frame_or_series is DataFrame: obj = obj.to_frame() - assert obj.flags is obj.flags + msg = "(DataFrame|Series).flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.flags is obj.flags obj2 = obj.copy() - assert obj2.flags is not obj.flags + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj2.flags is not obj.flags def test_bool_dep(self) -> None: # GH-51749 diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 29d6e2036476e..60156302a03c5 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -173,7 +173,10 @@ def test_inspect_getmembers(self): ser = Series(dtype=object) msg = "Series._data is deprecated" with tm.assert_produces_warning( - DeprecationWarning, match=msg, check_stacklevel=False + # FutureWarning is for Series.flags + (FutureWarning, DeprecationWarning), + match=msg, + check_stacklevel=False, ): inspect.getmembers(ser) diff --git a/pandas/tests/test_flags.py b/pandas/tests/test_flags.py index 9294b3fc3319b..d5a7179c0e652 100644 --- a/pandas/tests/test_flags.py +++ b/pandas/tests/test_flags.py @@ -1,12 +1,17 @@ import pytest import pandas as pd +import pandas._testing as tm + +set_msg = "DataFrame.set_flags is deprecated" +get_msg = "DataFrame.flags is deprecated" class TestFlags: def test_equality(self): - a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags - b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags + b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags assert a == a assert b == b @@ -14,29 +19,35 @@ def test_equality(self): assert a != 2 def test_set(self): - df = pd.DataFrame().set_flags(allows_duplicate_labels=True) - a = df.flags + with tm.assert_produces_warning(FutureWarning, match=set_msg): + df = pd.DataFrame().set_flags(allows_duplicate_labels=True) + with tm.assert_produces_warning(FutureWarning, match=get_msg): + a = df.flags a.allows_duplicate_labels = False assert a.allows_duplicate_labels is False a["allows_duplicate_labels"] = True assert a.allows_duplicate_labels is True def test_repr(self): - a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) assert a == "<Flags(allows_duplicate_labels=True)>" - a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) + with tm.assert_produces_warning(FutureWarning, match=set_msg): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) assert a == "<Flags(allows_duplicate_labels=False)>" def test_obj_ref(self): df = pd.DataFrame() - flags = df.flags + with tm.assert_produces_warning(FutureWarning, match=get_msg): + flags = df.flags del df with pytest.raises(ValueError, match="object has been deleted"): flags.allows_duplicate_labels = True def test_getitem(self): df = pd.DataFrame() - flags = df.flags + with tm.assert_produces_warning(FutureWarning, match=get_msg): + flags = df.flags assert flags["allows_duplicate_labels"] is True flags["allows_duplicate_labels"] = False assert flags["allows_duplicate_labels"] is False diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index a074898f6046d..2aa634cb30250 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -270,7 +270,9 @@ def test_assert_frame_equal_datetime_like_dtype_mismatch(dtype): def test_allows_duplicate_labels(): left = DataFrame() - right = DataFrame().set_flags(allows_duplicate_labels=False) + msg = "DataFrame.set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + right = DataFrame().set_flags(allows_duplicate_labels=False) tm.assert_frame_equal(left, left) tm.assert_frame_equal(right, right) tm.assert_frame_equal(left, right, check_flags=False) @@ -316,10 +318,13 @@ def test_assert_frame_equal_check_like_different_indexes(): def test_assert_frame_equal_checking_allow_dups_flag(): # GH#45554 left = DataFrame([[1, 2], [3, 4]]) - left.flags.allows_duplicate_labels = False + msg = "DataFrame.flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + left.flags.allows_duplicate_labels = False right = DataFrame([[1, 2], [3, 4]]) - right.flags.allows_duplicate_labels = True + with tm.assert_produces_warning(FutureWarning, match=msg): + right.flags.allows_duplicate_labels = True tm.assert_frame_equal(left, right, check_flags=False) with pytest.raises(AssertionError, match="allows_duplicate_labels"): diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index f722f619bc456..64d145e48d426 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -382,7 +382,9 @@ def test_assert_series_equal_ignore_extension_dtype_mismatch_cross_class(): def test_allows_duplicate_labels(): left = Series([1]) - right = Series([1]).set_flags(allows_duplicate_labels=False) + msg = "Series.set_flags is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + right = Series([1]).set_flags(allows_duplicate_labels=False) tm.assert_series_equal(left, left) tm.assert_series_equal(right, right) tm.assert_series_equal(left, right, check_flags=False)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref #51280
https://api.github.com/repos/pandas-dev/pandas/pulls/52153
2023-03-24T02:02:12Z
2024-01-09T22:02:24Z
null
2024-01-09T22:02:24Z
DEPR: attrs
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 644a7e86ec429..025ec1a79a95d 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -245,6 +245,7 @@ Deprecations - Deprecated :meth:`.Styler.applymap_index`. Use the new :meth:`.Styler.map_index` method instead (:issue:`52708`) - Deprecated :meth:`.Styler.applymap`. Use the new :meth:`.Styler.map` method instead (:issue:`52708`) - Deprecated :meth:`DataFrame.applymap`. Use the new :meth:`DataFrame.map` method instead (:issue:`52353`) +- Deprecated :meth:`DataFrame.attrs`, :meth:`Series.attrs`, to retain the old attribute propagation override ``__finalize__`` in a subclass (:issue:`51280`) - Deprecated :meth:`DataFrame.swapaxes` and :meth:`Series.swapaxes`, use :meth:`DataFrame.transpose` or :meth:`Series.transpose` instead (:issue:`51946`) - Deprecated ``freq`` parameter in :class:`PeriodArray` constructor, pass ``dtype`` instead (:issue:`52462`) - Deprecated behavior of :func:`concat` when :class:`DataFrame` has columns that are all-NA, in a future version these will not be discarded when determining the resulting dtype (:issue:`40893`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b6d862ba2180c..8f833c4e103b4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -338,12 +338,25 @@ def attrs(self) -> dict[Hashable, Any]: -------- DataFrame.flags : Global flags applying to this object. """ + warnings.warn( + f"{type(self).__name__}.attrs is deprecated and will be removed " + "in a future version", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: + warnings.warn( + f"{type(self).__name__}.attrs is deprecated and will be removed " + "in a future version", + FutureWarning, + stacklevel=find_stack_level(), + ) self._attrs = dict(value) @final @@ -2058,7 +2071,7 @@ def __getstate__(self) -> dict[str, Any]: "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, - "attrs": self.attrs, + "_attrs": self._attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } @@ -6058,8 +6071,8 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: stable across pandas releases. """ if isinstance(other, NDFrame): - for name in other.attrs: - self.attrs[name] = other.attrs[name] + for name in other._attrs: + self._attrs[name] = other._attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. @@ -6068,11 +6081,11 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": - attrs = other.objs[0].attrs - check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) + attrs = other.objs[0]._attrs + check_attrs = all(objs._attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: - self.attrs[name] = attrs[name] + self._attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 08546f03cee69..96af7960c62d1 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -801,10 +801,13 @@ def test_astype_noncontiguous(self, index_slice): def test_astype_retain_attrs(self, any_numpy_dtype): # GH#44414 df = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]}) - df.attrs["Location"] = "Michigan" + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.attrs["Location"] = "Michigan" - result = df.astype({"a": any_numpy_dtype}).attrs - expected = df.attrs + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.astype({"a": any_numpy_dtype}).attrs + expected = df.attrs tm.assert_dict_equal(expected, result) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index e0d9d6c281fd5..f77ba4223d0f6 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -312,11 +312,14 @@ async def test_tab_complete_warning(self, ip, frame_or_series): def test_attrs(self): df = DataFrame({"A": [2, 3]}) - assert df.attrs == {} - df.attrs["version"] = 1 + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert df.attrs == {} + df.attrs["version"] = 1 result = df.rename(columns=str) - assert result.attrs == {"version": 1} + with tm.assert_produces_warning(FutureWarning, match=msg): + assert result.attrs == {"version": 1} @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) def test_set_flags( diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index d6c4dff055748..cf60d450b8825 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -8,6 +8,7 @@ import pytest import pandas as pd +import pandas._testing as tm # TODO: # * Binary methods (mul, div, etc.) @@ -446,19 +447,26 @@ def test_finalize_called(ndframe_method): cls, init_args, method = ndframe_method ndframe = cls(*init_args) - ndframe.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + ndframe.attrs = {"a": 1} result = method(ndframe) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @not_implemented_mark def test_finalize_called_eval_numexpr(): pytest.importorskip("numexpr") + warn_msg = "(DataFrame|Series).attrs is deprecated" + df = pd.DataFrame({"A": [1, 2]}) - df.attrs["A"] = 1 + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + df.attrs["A"] = 1 result = df.eval("A + 1", engine="numexpr") - assert result.attrs == {"A": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"A": 1} # ---------------------------------------------------------------------------- @@ -480,13 +488,18 @@ def test_finalize_called_eval_numexpr(): ], ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})", ) +@pytest.mark.filterwarnings("ignore:Series.attrs is deprecated:FutureWarning") def test_binops(request, args, annotate, all_binary_operators): # This generates 624 tests... Is that needed? left, right = args + + warn_msg = "(DataFrame|Series).attrs is deprecated" if isinstance(left, (pd.DataFrame, pd.Series)): - left.attrs = {} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + left.attrs = {} if isinstance(right, (pd.DataFrame, pd.Series)): - right.attrs = {} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + right.attrs = {} if annotate == "left" and isinstance(left, int): pytest.skip("left is an int and doesn't support .attrs") @@ -541,9 +554,11 @@ def test_binops(request, args, annotate, all_binary_operators): ) ) if annotate in {"left", "both"} and not isinstance(left, int): - left.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + left.attrs = {"a": 1} if annotate in {"right", "both"} and not isinstance(right, int): - right.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + right.attrs = {"a": 1} is_cmp = all_binary_operators in [ operator.eq, @@ -560,7 +575,8 @@ def test_binops(request, args, annotate, all_binary_operators): right, left = right.align(left, axis=1, copy=False) result = all_binary_operators(left, right) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} # ---------------------------------------------------------------------------- @@ -622,9 +638,12 @@ def test_binops(request, args, annotate, all_binary_operators): ) def test_string_method(method): s = pd.Series(["a1"]) - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = method(s.str) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize( @@ -644,9 +663,12 @@ def test_string_method(method): ) def test_datetime_method(method): s = pd.Series(pd.date_range("2000", periods=4)) - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = method(s.dt) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize( @@ -681,9 +703,12 @@ def test_datetime_method(method): ) def test_datetime_property(attr): s = pd.Series(pd.date_range("2000", periods=4)) - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = getattr(s.dt, attr) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize( @@ -691,17 +716,23 @@ def test_datetime_property(attr): ) def test_timedelta_property(attr): s = pd.Series(pd.timedelta_range("2000", periods=4)) - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = getattr(s.dt, attr) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")]) def test_timedelta_methods(method): s = pd.Series(pd.timedelta_range("2000", periods=4)) - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = method(s.dt) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize( @@ -721,9 +752,12 @@ def test_timedelta_methods(method): @not_implemented_mark def test_categorical_accessor(method): s = pd.Series(["a", "b"], dtype="category") - s.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + s.attrs = {"a": 1} result = method(s.cat) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} # ---------------------------------------------------------------------------- @@ -744,9 +778,12 @@ def test_categorical_accessor(method): ], ) def test_groupby_finalize(obj, method): - obj.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + obj.attrs = {"a": 1} result = method(obj.groupby([0, 0], group_keys=False)) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} @pytest.mark.parametrize( @@ -766,9 +803,12 @@ def test_groupby_finalize(obj, method): ) @not_implemented_mark def test_groupby_finalize_not_implemented(obj, method): - obj.attrs = {"a": 1} + warn_msg = "(DataFrame|Series).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + obj.attrs = {"a": 1} result = method(obj.groupby([0, 0])) - assert result.attrs == {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + assert result.attrs == {"a": 1} def test_finalize_frame_series_name(): diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index d393ba6fd3957..841a5fa72adc0 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -71,7 +71,10 @@ def test_categorical_dtype(data): desc_cat["categories"]._col, pd.Series(["a", "d", "e", "s", "t"]) ) - tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = from_dataframe(df.__dataframe__()) + tm.assert_frame_equal(df, res) def test_categorical_pyarrow(): @@ -81,7 +84,9 @@ def test_categorical_pyarrow(): arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"] table = pa.table({"weekday": pa.array(arr).dictionary_encode()}) exchange_df = table.__dataframe__() - result = from_dataframe(exchange_df) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = from_dataframe(exchange_df) weekday = pd.Categorical( arr, categories=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] ) @@ -96,7 +101,9 @@ def test_large_string_pyarrow(): arr = ["Mon", "Tue"] table = pa.table({"weekday": pa.array(arr, "large_string")}) exchange_df = table.__dataframe__() - result = from_dataframe(exchange_df) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = from_dataframe(exchange_df) expected = pd.DataFrame({"weekday": ["Mon", "Tue"]}) tm.assert_frame_equal(result, expected) @@ -122,7 +129,9 @@ def test_bitmasks_pyarrow(offset, length, expected_values): arr = [3.3, None, 2.1] table = pa.table({"arr": arr}).slice(offset, length) exchange_df = table.__dataframe__() - result = from_dataframe(exchange_df) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = from_dataframe(exchange_df) expected = pd.DataFrame({"arr": expected_values}) tm.assert_frame_equal(result, expected) @@ -146,12 +155,15 @@ def test_dataframe(data): indices = (0, 2) names = tuple(list(data.keys())[idx] for idx in indices) - result = from_dataframe(df2.select_columns(indices)) - expected = from_dataframe(df2.select_columns_by_name(names)) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = from_dataframe(df2.select_columns(indices)) + expected = from_dataframe(df2.select_columns_by_name(names)) tm.assert_frame_equal(result, expected) - assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) - assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) def test_missing_from_masked(): @@ -249,7 +261,10 @@ def test_datetime(): assert col.dtype[0] == DtypeKind.DATETIME assert col.describe_null == (ColumnNullType.USE_SENTINEL, iNaT) - tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = from_dataframe(df.__dataframe__()) + tm.assert_frame_equal(df, res) @td.skip_if_np_lt("1.23") diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 60506aa2fbd0a..a1a00cf4caa90 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -115,6 +115,9 @@ def test_flatten_buffer(data): assert result.shape == (result.nbytes,) +@pytest.mark.filterwarnings( + "ignore:(DataFrame|Series).attrs is deprecated:FutureWarning" +) def test_pickles(datapath): if not is_platform_little_endian(): pytest.skip("known failure on non-little endian") @@ -583,8 +586,10 @@ def test_pickle_frame_v124_unpickle_130(datapath): "1.2.4", "empty_frame_v1_2_4-GH#42345.pkl", ) - with open(path, "rb") as fd: - df = pickle.load(fd) + msg = "DataFrame.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with open(path, "rb") as fd: + df = pickle.load(fd) expected = pd.DataFrame(index=[], columns=[]) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index ef02e9f7a465a..4008d2c1768e9 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -714,11 +714,15 @@ def test_concat_multiindex_with_empty_rangeindex(): def test_concat_drop_attrs(data): # GH#41828 df1 = data.copy() - df1.attrs = {1: 1} + msg = "(Series|DataFrame).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df1.attrs = {1: 1} df2 = data.copy() - df2.attrs = {1: 2} + with tm.assert_produces_warning(FutureWarning, match=msg): + df2.attrs = {1: 2} df = concat([df1, df2]) - assert len(df.attrs) == 0 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert len(df.attrs) == 0 @pytest.mark.parametrize( @@ -737,11 +741,15 @@ def test_concat_drop_attrs(data): def test_concat_retain_attrs(data): # GH#41828 df1 = data.copy() - df1.attrs = {1: 1} + msg = "(Series|DataFrame).attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df1.attrs = {1: 1} df2 = data.copy() - df2.attrs = {1: 1} + with tm.assert_produces_warning(FutureWarning, match=msg): + df2.attrs = {1: 1} df = concat([df1, df2]) - assert df.attrs[1] == 1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert df.attrs[1] == 1 @td.skip_array_manager_invalid_test diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 71ce8541de24b..29004bccedb78 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -441,10 +441,13 @@ def test_astype_ea_to_datetimetzdtype(self, dtype): def test_astype_retain_attrs(self, any_numpy_dtype): # GH#44414 ser = Series([0, 1, 2, 3]) - ser.attrs["Location"] = "Michigan" + msg = "Series.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser.attrs["Location"] = "Michigan" - result = ser.astype(any_numpy_dtype).attrs - expected = ser.attrs + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.astype(any_numpy_dtype).attrs + expected = ser.attrs tm.assert_dict_equal(expected, result) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index e4e276af121f9..6100bb759d94f 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -161,10 +161,13 @@ def test_integer_series_size(self, dtype): def test_attrs(self): s = Series([0, 1], name="abc") - assert s.attrs == {} - s.attrs["version"] = 1 + msg = "Series.attrs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert s.attrs == {} + s.attrs["version"] = 1 result = s + 1 - assert result.attrs == {"version": 1} + with tm.assert_produces_warning(FutureWarning, match=msg): + assert result.attrs == {"version": 1} @skip_if_no("jinja2") def test_inspect_getmembers(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref #51280 * https://github.com/pandas-dev/pandas/issues/52166
https://api.github.com/repos/pandas-dev/pandas/pulls/52152
2023-03-24T01:39:23Z
2023-05-04T15:32:53Z
null
2023-05-04T15:32:59Z
Fix/mpl37 compat
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 49b92e0984713..54bd1c843da79 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1112,7 +1112,9 @@ def _get_subplots(self): from matplotlib.axes import Subplot return [ - ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot) + ax + for ax in self.fig.get_axes() + if (isinstance(ax, Subplot) and ax.get_subplotspec() is not None) ] def _get_axes_layout(self) -> tuple[int, int]: diff --git a/pandas/tests/plotting/test_common.py b/pandas/tests/plotting/test_common.py index d4624cfc74872..faf8278675566 100644 --- a/pandas/tests/plotting/test_common.py +++ b/pandas/tests/plotting/test_common.py @@ -40,3 +40,22 @@ def test__gen_two_subplots_with_ax(self): subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1]) subplot_geometry[-1] += 1 assert subplot_geometry == [2, 1, 2] + + def test_colorbar_layout(self): + fig = self.plt.figure() + + axes = fig.subplot_mosaic( + """ + AB + CC + """ + ) + + x = [1, 2, 3] + y = [1, 2, 3] + + cs0 = axes["A"].scatter(x, y) + axes["B"].scatter(x, y) + + fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right") + DataFrame(x).plot(ax=axes["C"])
This was reported via https://github.com/matplotlib/matplotlib/issues/25538 , I can not find an issue if @ocefpaf reported one. - [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [n/a] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [n/a] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52150
2023-03-24T00:21:55Z
2023-03-24T17:06:29Z
2023-03-24T17:06:29Z
2023-06-25T23:48:10Z
DOC: Simplify pandas theme footer
diff --git a/doc/source/_static/css/pandas.css b/doc/source/_static/css/pandas.css index a08be3301edda..0957c340d673c 100644 --- a/doc/source/_static/css/pandas.css +++ b/doc/source/_static/css/pandas.css @@ -36,17 +36,6 @@ table { padding: 2.5rem 0rem 0.5rem 0rem; } -.intro-card .card-footer { - border: none; - background-color: transparent; -} - -.intro-card .card-footer p.card-text{ - max-width: 220px; - margin-left: auto; - margin-right: auto; -} - .card, .card img { background-color: transparent !important; } diff --git a/doc/source/conf.py b/doc/source/conf.py index c73a91aa90365..cd791455a28df 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -240,7 +240,7 @@ html_theme_options = { "external_links": [], - "footer_items": ["pandas_footer", "sphinx-version"], + "footer_start": ["pandas_footer", "sphinx-version"], "github_url": "https://github.com/pandas-dev/pandas", "twitter_url": "https://twitter.com/pandas_dev", "google_analytics_id": "UA-27880019-2", diff --git a/environment.yml b/environment.yml index f29ade1dc5173..80cfedb9c94d4 100644 --- a/environment.yml +++ b/environment.yml @@ -87,7 +87,7 @@ dependencies: - gitdb - natsort # DataFrame.sort_values doctest - numpydoc - - pydata-sphinx-theme<0.11 + - pydata-sphinx-theme>=0.13.0 - pytest-cython # doctest - sphinx - sphinx-panels diff --git a/requirements-dev.txt b/requirements-dev.txt index 9c0bdc64d6e07..a6762e9a2c7f9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -62,7 +62,7 @@ gitpython gitdb natsort numpydoc -pydata-sphinx-theme<0.11 +pydata-sphinx-theme>=0.13.0 pytest-cython sphinx sphinx-panels
This pr is related to issue #51536. The next changes were made: 1. Changed a version of `pydata-sphinx-theme` in environment.yml from less than 0.11 to 0.13.0 2. Updated my environment by running mamba env update -n pandas-dev --file environment.yml 3. In conf.py I changed `html_theme_options`. Now "footer_start" is used instead of "footer_items” 4. In doc/source/_static/css/pandas.css where we override some aspects of `pydata-sphinx-theme` I removed `card-footer` 5. After cleaning and rebuilding documentation there is no footer in our html.
https://api.github.com/repos/pandas-dev/pandas/pulls/52149
2023-03-23T22:56:54Z
2023-05-15T20:50:57Z
null
2023-05-15T20:50:58Z
BLD: Fix pyproject.toml placement
diff --git a/MANIFEST.in b/MANIFEST.in index 361cd8ff9ec22..9cf8baf0d9501 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,5 @@ include RELEASE.md +include pyproject.toml include versioneer.py graft doc diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 15fa10b9e4289..c86fc05537267 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -177,7 +177,7 @@ def test_version_tag(): def test_pyproject_present(): # Check pyproject.toml is present(relevant for wheels) - pyproject_loc = os.path.join(os.path.dirname(__file__), "../../pyproject.toml") + pyproject_loc = os.path.join(os.path.dirname(__file__), "../pyproject.toml") assert os.path.exists(pyproject_loc) diff --git a/pyproject.toml b/pyproject.toml index 1bc530df74e87..82063ab626bf7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,9 +125,6 @@ include-package-data = true include = ["pandas", "pandas.*"] namespaces = false -[tool.setuptools.package-data] -pandas = ["../pyproject.toml"] - [tool.setuptools.exclude-package-data] "*" = ["*.c", "*.h"]
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Let's see if this works. I think this happened since I messed up the path in the test.
https://api.github.com/repos/pandas-dev/pandas/pulls/52148
2023-03-23T22:55:46Z
2023-03-23T23:03:07Z
null
2023-03-27T15:18:30Z